repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/DataGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.loadGenerator;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This program reads the directory structure and file structure from
* the input directory and creates the namespace in the file system
* specified by the configuration in the specified root.
* All the files are filled with 'a'.
*
* The synopsis of the command is
* java DataGenerator
* -inDir <inDir>: input directory name where directory/file structures
* are stored. Its default value is the current directory.
* -root <root>: the name of the root directory which the new namespace
* is going to be placed under.
* Its default value is "/testLoadSpace".
*/
public class DataGenerator extends Configured implements Tool {
private File inDir = StructureGenerator.DEFAULT_STRUCTURE_DIRECTORY;
private Path root = DEFAULT_ROOT;
private FileContext fc;
final static private long BLOCK_SIZE = 10;
final static private String USAGE = "java DataGenerator " +
"-inDir <inDir> " +
"-root <root>";
/** default name of the root where the test namespace will be placed under */
final static Path DEFAULT_ROOT = new Path("/testLoadSpace");
/** Main function.
* It first parses the command line arguments.
* It then reads the directory structure from the input directory
* structure file and creates directory structure in the file system
* namespace. Afterwards it reads the file attributes and creates files
* in the file. All file content is filled with 'a'.
*/
@Override
public int run(String[] args) throws Exception {
int exitCode = 0;
exitCode = init(args);
if (exitCode != 0) {
return exitCode;
}
genDirStructure();
genFiles();
return exitCode;
}
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
try { // initialize file system handle
fc = FileContext.getFileContext(getConf());
} catch (IOException ioe) {
System.err.println("Can not initialize the file system: " +
ioe.getLocalizedMessage());
return -1;
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-root")) {
root = new Path(args[++i]);
} else if (args[i].equals("-inDir")) {
inDir = new File(args[++i]);
} else {
System.err.println(USAGE);
ToolRunner.printGenericCommandUsage(System.err);
System.exit(-1);
}
}
return 0;
}
/** Read directory structure file under the input directory.
* Create each directory under the specified root.
* The directory names are relative to the specified root.
*/
private void genDirStructure() throws IOException {
BufferedReader in = new BufferedReader(
new FileReader(new File(inDir,
StructureGenerator.DIR_STRUCTURE_FILE_NAME)));
String line;
while ((line=in.readLine()) != null) {
fc.mkdir(new Path(root+line), FileContext.DEFAULT_PERM, true);
}
}
/** Read file structure file under the input directory.
* Create each file under the specified root.
* The file names are relative to the root.
*/
private void genFiles() throws IOException {
BufferedReader in = new BufferedReader(
new FileReader(new File(inDir,
StructureGenerator.FILE_STRUCTURE_FILE_NAME)));
String line;
while ((line=in.readLine()) != null) {
String[] tokens = line.split(" ");
if (tokens.length != 2) {
throw new IOException("Expect at most 2 tokens per line: " + line);
}
String fileName = root+tokens[0];
long fileSize = (long)(BLOCK_SIZE*Double.parseDouble(tokens[1]));
genFile(new Path(fileName), fileSize);
}
}
/** Create a file with the name <code>file</code> and
* a length of <code>fileSize</code>. The file is filled with character 'a'.
*/
private void genFile(Path file, long fileSize) throws IOException {
FSDataOutputStream out = fc.create(file,
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
CreateOpts.createParent(), CreateOpts.bufferSize(4096),
CreateOpts.repFac((short) 3));
for(long i=0; i<fileSize; i++) {
out.writeByte('a');
}
out.close();
}
/** Main program.
*
* @param args Command line arguments
* @throws Exception
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(),
new DataGenerator(), args);
System.exit(res);
}
}
| 5,856 | 34.49697 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/StructureGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.loadGenerator;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.util.ToolRunner;
/**
* This program generates a random namespace structure with the following
* constraints:
* 1. The number of subdirectories is a random number in [minWidth, maxWidth].
* 2. The maximum depth of each subdirectory is a random number
* [2*maxDepth/3, maxDepth].
* 3. Files are randomly placed in the empty directories. The size of each
* file follows Gaussian distribution.
* The generated namespace structure is described by two files in the output
* directory. Each line of the first file
* contains the full name of a leaf directory.
* Each line of the second file contains
* the full name of a file and its size, separated by a blank.
*
* The synopsis of the command is
* java StructureGenerator
-maxDepth <maxDepth> : maximum depth of the directory tree; default is 5.
-minWidth <minWidth> : minimum number of subdirectories per directories; default is 1
-maxWidth <maxWidth> : maximum number of subdirectories per directories; default is 5
-numOfFiles <#OfFiles> : the total number of files; default is 10.
-avgFileSize <avgFileSizeInBlocks>: average size of blocks; default is 1.
-outDir <outDir>: output directory; default is the current directory.
-seed <seed>: random number generator seed; default is the current time.
*/
public class StructureGenerator {
private int maxDepth = 5;
private int minWidth = 1;
private int maxWidth = 5;
private int numOfFiles = 10;
private double avgFileSize = 1;
private File outDir = DEFAULT_STRUCTURE_DIRECTORY;
final static private String USAGE = "java StructureGenerator\n" +
"-maxDepth <maxDepth>\n" +
"-minWidth <minWidth>\n" +
"-maxWidth <maxWidth>\n" +
"-numOfFiles <#OfFiles>\n" +
"-avgFileSize <avgFileSizeInBlocks>\n" +
"-outDir <outDir>\n" +
"-seed <seed>";
private Random r = null;
/** Default directory for storing file/directory structure */
final static File DEFAULT_STRUCTURE_DIRECTORY = new File(".");
/** The name of the file for storing directory structure */
final static String DIR_STRUCTURE_FILE_NAME = "dirStructure";
/** The name of the file for storing file structure */
final static String FILE_STRUCTURE_FILE_NAME = "fileStructure";
/** The name prefix for the files created by this program */
final static String FILE_NAME_PREFIX = "_file_";
/**
* The main function first parses the command line arguments,
* then generates in-memory directory structure and outputs to a file,
* last generates in-memory files and outputs them to a file.
*/
public int run(String[] args) throws Exception {
int exitCode = 0;
exitCode = init(args);
if (exitCode != 0) {
return exitCode;
}
genDirStructure();
output(new File(outDir, DIR_STRUCTURE_FILE_NAME));
genFileStructure();
outputFiles(new File(outDir, FILE_STRUCTURE_FILE_NAME));
return exitCode;
}
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
try {
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-maxDepth")) {
maxDepth = Integer.parseInt(args[++i]);
if (maxDepth<1) {
System.err.println("maxDepth must be positive: " + maxDepth);
return -1;
}
} else if (args[i].equals("-minWidth")) {
minWidth = Integer.parseInt(args[++i]);
if (minWidth<0) {
System.err.println("minWidth must be positive: " + minWidth);
return -1;
}
} else if (args[i].equals("-maxWidth")) {
maxWidth = Integer.parseInt(args[++i]);
} else if (args[i].equals("-numOfFiles")) {
numOfFiles = Integer.parseInt(args[++i]);
if (numOfFiles<1) {
System.err.println("NumOfFiles must be positive: " + numOfFiles);
return -1;
}
} else if (args[i].equals("-avgFileSize")) {
avgFileSize = Double.parseDouble(args[++i]);
if (avgFileSize<=0) {
System.err.println("AvgFileSize must be positive: " + avgFileSize);
return -1;
}
} else if (args[i].equals("-outDir")) {
outDir = new File(args[++i]);
} else if (args[i].equals("-seed")) {
r = new Random(Long.parseLong(args[++i]));
} else {
System.err.println(USAGE);
ToolRunner.printGenericCommandUsage(System.err);
return -1;
}
}
} catch (NumberFormatException e) {
System.err.println("Illegal parameter: " + e.getLocalizedMessage());
System.err.println(USAGE);
return -1;
}
if (maxWidth < minWidth) {
System.err.println(
"maxWidth must be bigger than minWidth: " + maxWidth);
return -1;
}
if (r==null) {
r = new Random();
}
return 0;
}
/** In memory representation of a directory */
private static class INode {
private String name;
private List<INode> children = new ArrayList<INode>();
/** Constructor */
private INode(String name) {
this.name = name;
}
/** Add a child (subdir/file) */
private void addChild(INode child) {
children.add(child);
}
/** Output the subtree rooted at the current node.
* Only the leaves are printed.
*/
private void output(PrintStream out, String prefix) {
prefix = prefix==null?name:prefix+"/"+name;
if (children.isEmpty()) {
out.println(prefix);
} else {
for (INode child : children) {
child.output(out, prefix);
}
}
}
/** Output the files in the subtree rooted at this node */
protected void outputFiles(PrintStream out, String prefix) {
prefix = prefix==null?name:prefix+"/"+name;
for (INode child : children) {
child.outputFiles(out, prefix);
}
}
/** Add all the leaves in the subtree to the input list */
private void getLeaves(List<INode> leaves) {
if (children.isEmpty()) {
leaves.add(this);
} else {
for (INode child : children) {
child.getLeaves(leaves);
}
}
}
}
/** In memory representation of a file */
private static class FileINode extends INode {
private double numOfBlocks;
/** constructor */
private FileINode(String name, double numOfBlocks) {
super(name);
this.numOfBlocks = numOfBlocks;
}
/** Output a file attribute */
@Override
protected void outputFiles(PrintStream out, String prefix) {
prefix = (prefix == null)?super.name: prefix + "/"+super.name;
out.println(prefix + " " + numOfBlocks);
}
}
private INode root;
/** Generates a directory tree with a max depth of <code>maxDepth</code> */
private void genDirStructure() {
root = genDirStructure("", maxDepth);
}
/** Generate a directory tree rooted at <code>rootName</code>
* The number of subtree is in the range of [minWidth, maxWidth].
* The maximum depth of each subtree is in the range of
* [2*maxDepth/3, maxDepth].
*/
private INode genDirStructure(String rootName, int maxDepth) {
INode root = new INode(rootName);
if (maxDepth>0) {
maxDepth--;
int minDepth = maxDepth*2/3;
// Figure out the number of subdirectories to generate
int numOfSubDirs = minWidth + r.nextInt(maxWidth-minWidth+1);
// Expand the tree
for (int i=0; i<numOfSubDirs; i++) {
int childDepth = (maxDepth == 0)?0:
(r.nextInt(maxDepth-minDepth+1)+minDepth);
INode child = genDirStructure("dir"+i, childDepth);
root.addChild(child);
}
}
return root;
}
/** Collects leaf nodes in the tree */
private List<INode> getLeaves() {
List<INode> leaveDirs = new ArrayList<INode>();
root.getLeaves(leaveDirs);
return leaveDirs;
}
/** Decides where to place all the files and its length.
* It first collects all empty directories in the tree.
* For each file, it randomly chooses an empty directory to place the file.
* The file's length is generated using Gaussian distribution.
*/
private void genFileStructure() {
List<INode> leaves = getLeaves();
int totalLeaves = leaves.size();
for (int i=0; i<numOfFiles; i++) {
int leaveNum = r.nextInt(totalLeaves);
double fileSize;
do {
fileSize = r.nextGaussian()+avgFileSize;
} while (fileSize<0);
leaves.get(leaveNum).addChild(
new FileINode(FILE_NAME_PREFIX+i, fileSize));
}
}
/** Output directory structure to a file, each line of the file
* contains the directory name. Only empty directory names are printed. */
private void output(File outFile) throws FileNotFoundException {
System.out.println("Printing to " + outFile.toString());
PrintStream out = new PrintStream(outFile);
root.output(out, null);
out.close();
}
/** Output all files' attributes to a file, each line of the output file
* contains a file name and its length. */
private void outputFiles(File outFile) throws FileNotFoundException {
System.out.println("Printing to " + outFile.toString());
PrintStream out = new PrintStream(outFile);
root.outputFiles(out, null);
out.close();
}
/**
* Main program
* @param args Command line arguments
* @throws Exception
*/
public static void main(String[] args) throws Exception {
StructureGenerator sg = new StructureGenerator();
System.exit(sg.run(args));
}
}
| 10,640 | 33.436893 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.ftp;
import org.apache.commons.net.ftp.FTP;
import org.junit.Assert;
import org.junit.Test;
public class TestFTPFileSystem {
@Test
public void testFTPDefaultPort() throws Exception {
FTPFileSystem ftp = new FTPFileSystem();
Assert.assertEquals(FTP.DEFAULT_PORT, ftp.getDefaultPort());
}
}
| 1,155 | 34.030303 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import junit.framework.TestCase;
import static org.apache.hadoop.fs.permission.FsAction.*;
public class TestFsPermission extends TestCase {
public void testFsAction() {
//implies
for(FsAction a : FsAction.values()) {
assertTrue(ALL.implies(a));
}
for(FsAction a : FsAction.values()) {
assertTrue(a == NONE? NONE.implies(a): !NONE.implies(a));
}
for(FsAction a : FsAction.values()) {
assertTrue(a == READ_EXECUTE || a == READ || a == EXECUTE || a == NONE?
READ_EXECUTE.implies(a): !READ_EXECUTE.implies(a));
}
//masks
assertEquals(EXECUTE, EXECUTE.and(READ_EXECUTE));
assertEquals(READ, READ.and(READ_EXECUTE));
assertEquals(NONE, WRITE.and(READ_EXECUTE));
assertEquals(READ, READ_EXECUTE.and(READ_WRITE));
assertEquals(NONE, READ_EXECUTE.and(WRITE));
assertEquals(WRITE_EXECUTE, ALL.and(WRITE_EXECUTE));
}
/**
* Ensure that when manually specifying permission modes we get
* the expected values back out for all combinations
*/
public void testConvertingPermissions() {
for(short s = 0; s <= 01777; s++) {
assertEquals(s, new FsPermission(s).toShort());
}
short s = 0;
for(boolean sb : new boolean [] { false, true }) {
for(FsAction u : FsAction.values()) {
for(FsAction g : FsAction.values()) {
for(FsAction o : FsAction.values()) {
// Cover constructor with sticky bit.
FsPermission f = new FsPermission(u, g, o, sb);
assertEquals(s, f.toShort());
FsPermission f2 = new FsPermission(f);
assertEquals(s, f2.toShort());
s++;
}
}
}
}
}
public void testSpecialBitsToString() {
for (boolean sb : new boolean[] { false, true }) {
for (FsAction u : FsAction.values()) {
for (FsAction g : FsAction.values()) {
for (FsAction o : FsAction.values()) {
FsPermission f = new FsPermission(u, g, o, sb);
String fString = f.toString();
// Check that sticky bit is represented correctly.
if (f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
assertEquals('t', fString.charAt(8));
else if (f.getStickyBit() && !f.getOtherAction().implies(EXECUTE))
assertEquals('T', fString.charAt(8));
else if (!f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
assertEquals('x', fString.charAt(8));
else
assertEquals('-', fString.charAt(8));
assertEquals(9, fString.length());
}
}
}
}
}
public void testFsPermission() {
String symbolic = "-rwxrwxrwx";
for(int i = 0; i < (1 << 10); i++) {
StringBuilder b = new StringBuilder("----------");
String binary = String.format("%11s", Integer.toBinaryString(i));
String permBinary = binary.substring(2, binary.length());
int len = permBinary.length();
for(int j = 0; j < len; j++) {
if (permBinary.charAt(j) == '1') {
int k = 9 - (len - 1 - j);
b.setCharAt(k, symbolic.charAt(k));
}
}
// Check for sticky bit.
if (binary.charAt(1) == '1') {
char replacement = b.charAt(9) == 'x' ? 't' : 'T';
b.setCharAt(9, replacement);
}
assertEquals(i, FsPermission.valueOf(b.toString()).toShort());
}
}
public void testUMaskParser() throws IOException {
Configuration conf = new Configuration();
// Ensure that we get the right octal values back for all legal values
for(FsAction u : FsAction.values()) {
for(FsAction g : FsAction.values()) {
for(FsAction o : FsAction.values()) {
FsPermission f = new FsPermission(u, g, o);
String asOctal = String.format("%1$03o", f.toShort());
conf.set(FsPermission.UMASK_LABEL, asOctal);
FsPermission fromConf = FsPermission.getUMask(conf);
assertEquals(f, fromConf);
}
}
}
}
public void testSymbolicUmasks() {
Configuration conf = new Configuration();
// Test some symbolic to octal settings
// Symbolic umask list is generated in linux shell using by the command:
// umask 0; umask <octal number>; umask -S
String [][] symbolic = new String [][] {
{"a+rw", "111",},
{"u=rwx,g=rwx,o=rwx", "0",},
{"u=rwx,g=rwx,o=rw", "1",},
{"u=rwx,g=rwx,o=rx", "2",},
{"u=rwx,g=rwx,o=r", "3",},
{"u=rwx,g=rwx,o=wx", "4",},
{"u=rwx,g=rwx,o=w", "5",},
{"u=rwx,g=rwx,o=x", "6",},
{"u=rwx,g=rwx,o=", "7",},
{"u=rwx,g=rw,o=rwx", "10",},
{"u=rwx,g=rw,o=rw", "11",},
{"u=rwx,g=rw,o=rx", "12",},
{"u=rwx,g=rw,o=r", "13",},
{"u=rwx,g=rw,o=wx", "14",},
{"u=rwx,g=rw,o=w", "15",},
{"u=rwx,g=rw,o=x", "16",},
{"u=rwx,g=rw,o=", "17",},
{"u=rwx,g=rx,o=rwx", "20",},
{"u=rwx,g=rx,o=rw", "21",},
{"u=rwx,g=rx,o=rx", "22",},
{"u=rwx,g=rx,o=r", "23",},
{"u=rwx,g=rx,o=wx", "24",},
{"u=rwx,g=rx,o=w", "25",},
{"u=rwx,g=rx,o=x", "26",},
{"u=rwx,g=rx,o=", "27",},
{"u=rwx,g=r,o=rwx", "30",},
{"u=rwx,g=r,o=rw", "31",},
{"u=rwx,g=r,o=rx", "32",},
{"u=rwx,g=r,o=r", "33",},
{"u=rwx,g=r,o=wx", "34",},
{"u=rwx,g=r,o=w", "35",},
{"u=rwx,g=r,o=x", "36",},
{"u=rwx,g=r,o=", "37",},
{"u=rwx,g=wx,o=rwx", "40",},
{"u=rwx,g=wx,o=rw", "41",},
{"u=rwx,g=wx,o=rx", "42",},
{"u=rwx,g=wx,o=r", "43",},
{"u=rwx,g=wx,o=wx", "44",},
{"u=rwx,g=wx,o=w", "45",},
{"u=rwx,g=wx,o=x", "46",},
{"u=rwx,g=wx,o=", "47",},
{"u=rwx,g=w,o=rwx", "50",},
{"u=rwx,g=w,o=rw", "51",},
{"u=rwx,g=w,o=rx", "52",},
{"u=rwx,g=w,o=r", "53",},
{"u=rwx,g=w,o=wx", "54",},
{"u=rwx,g=w,o=w", "55",},
{"u=rwx,g=w,o=x", "56",},
{"u=rwx,g=w,o=", "57",},
{"u=rwx,g=x,o=rwx", "60",},
{"u=rwx,g=x,o=rw", "61",},
{"u=rwx,g=x,o=rx", "62",},
{"u=rwx,g=x,o=r", "63",},
{"u=rwx,g=x,o=wx", "64",},
{"u=rwx,g=x,o=w", "65",},
{"u=rwx,g=x,o=x", "66",},
{"u=rwx,g=x,o=", "67",},
{"u=rwx,g=,o=rwx", "70",},
{"u=rwx,g=,o=rw", "71",},
{"u=rwx,g=,o=rx", "72",},
{"u=rwx,g=,o=r", "73",},
{"u=rwx,g=,o=wx", "74",},
{"u=rwx,g=,o=w", "75",},
{"u=rwx,g=,o=x", "76",},
{"u=rwx,g=,o=", "77",},
{"u=rw,g=rwx,o=rwx", "100",},
{"u=rw,g=rwx,o=rw", "101",},
{"u=rw,g=rwx,o=rx", "102",},
{"u=rw,g=rwx,o=r", "103",},
{"u=rw,g=rwx,o=wx", "104",},
{"u=rw,g=rwx,o=w", "105",},
{"u=rw,g=rwx,o=x", "106",},
{"u=rw,g=rwx,o=", "107",},
{"u=rw,g=rw,o=rwx", "110",},
{"u=rw,g=rw,o=rw", "111",},
{"u=rw,g=rw,o=rx", "112",},
{"u=rw,g=rw,o=r", "113",},
{"u=rw,g=rw,o=wx", "114",},
{"u=rw,g=rw,o=w", "115",},
{"u=rw,g=rw,o=x", "116",},
{"u=rw,g=rw,o=", "117",},
{"u=rw,g=rx,o=rwx", "120",},
{"u=rw,g=rx,o=rw", "121",},
{"u=rw,g=rx,o=rx", "122",},
{"u=rw,g=rx,o=r", "123",},
{"u=rw,g=rx,o=wx", "124",},
{"u=rw,g=rx,o=w", "125",},
{"u=rw,g=rx,o=x", "126",},
{"u=rw,g=rx,o=", "127",},
{"u=rw,g=r,o=rwx", "130",},
{"u=rw,g=r,o=rw", "131",},
{"u=rw,g=r,o=rx", "132",},
{"u=rw,g=r,o=r", "133",},
{"u=rw,g=r,o=wx", "134",},
{"u=rw,g=r,o=w", "135",},
{"u=rw,g=r,o=x", "136",},
{"u=rw,g=r,o=", "137",},
{"u=rw,g=wx,o=rwx", "140",},
{"u=rw,g=wx,o=rw", "141",},
{"u=rw,g=wx,o=rx", "142",},
{"u=rw,g=wx,o=r", "143",},
{"u=rw,g=wx,o=wx", "144",},
{"u=rw,g=wx,o=w", "145",},
{"u=rw,g=wx,o=x", "146",},
{"u=rw,g=wx,o=", "147",},
{"u=rw,g=w,o=rwx", "150",},
{"u=rw,g=w,o=rw", "151",},
{"u=rw,g=w,o=rx", "152",},
{"u=rw,g=w,o=r", "153",},
{"u=rw,g=w,o=wx", "154",},
{"u=rw,g=w,o=w", "155",},
{"u=rw,g=w,o=x", "156",},
{"u=rw,g=w,o=", "157",},
{"u=rw,g=x,o=rwx", "160",},
{"u=rw,g=x,o=rw", "161",},
{"u=rw,g=x,o=rx", "162",},
{"u=rw,g=x,o=r", "163",},
{"u=rw,g=x,o=wx", "164",},
{"u=rw,g=x,o=w", "165",},
{"u=rw,g=x,o=x", "166",},
{"u=rw,g=x,o=", "167",},
{"u=rw,g=,o=rwx", "170",},
{"u=rw,g=,o=rw", "171",},
{"u=rw,g=,o=rx", "172",},
{"u=rw,g=,o=r", "173",},
{"u=rw,g=,o=wx", "174",},
{"u=rw,g=,o=w", "175",},
{"u=rw,g=,o=x", "176",},
{"u=rw,g=,o=", "177",},
{"u=rx,g=rwx,o=rwx", "200",},
{"u=rx,g=rwx,o=rw", "201",},
{"u=rx,g=rwx,o=rx", "202",},
{"u=rx,g=rwx,o=r", "203",},
{"u=rx,g=rwx,o=wx", "204",},
{"u=rx,g=rwx,o=w", "205",},
{"u=rx,g=rwx,o=x", "206",},
{"u=rx,g=rwx,o=", "207",},
{"u=rx,g=rw,o=rwx", "210",},
{"u=rx,g=rw,o=rw", "211",},
{"u=rx,g=rw,o=rx", "212",},
{"u=rx,g=rw,o=r", "213",},
{"u=rx,g=rw,o=wx", "214",},
{"u=rx,g=rw,o=w", "215",},
{"u=rx,g=rw,o=x", "216",},
{"u=rx,g=rw,o=", "217",},
{"u=rx,g=rx,o=rwx", "220",},
{"u=rx,g=rx,o=rw", "221",},
{"u=rx,g=rx,o=rx", "222",},
{"u=rx,g=rx,o=r", "223",},
{"u=rx,g=rx,o=wx", "224",},
{"u=rx,g=rx,o=w", "225",},
{"u=rx,g=rx,o=x", "226",},
{"u=rx,g=rx,o=", "227",},
{"u=rx,g=r,o=rwx", "230",},
{"u=rx,g=r,o=rw", "231",},
{"u=rx,g=r,o=rx", "232",},
{"u=rx,g=r,o=r", "233",},
{"u=rx,g=r,o=wx", "234",},
{"u=rx,g=r,o=w", "235",},
{"u=rx,g=r,o=x", "236",},
{"u=rx,g=r,o=", "237",},
{"u=rx,g=wx,o=rwx", "240",},
{"u=rx,g=wx,o=rw", "241",},
{"u=rx,g=wx,o=rx", "242",},
{"u=rx,g=wx,o=r", "243",},
{"u=rx,g=wx,o=wx", "244",},
{"u=rx,g=wx,o=w", "245",},
{"u=rx,g=wx,o=x", "246",},
{"u=rx,g=wx,o=", "247",},
{"u=rx,g=w,o=rwx", "250",},
{"u=rx,g=w,o=rw", "251",},
{"u=rx,g=w,o=rx", "252",},
{"u=rx,g=w,o=r", "253",},
{"u=rx,g=w,o=wx", "254",},
{"u=rx,g=w,o=w", "255",},
{"u=rx,g=w,o=x", "256",},
{"u=rx,g=w,o=", "257",},
{"u=rx,g=x,o=rwx", "260",},
{"u=rx,g=x,o=rw", "261",},
{"u=rx,g=x,o=rx", "262",},
{"u=rx,g=x,o=r", "263",},
{"u=rx,g=x,o=wx", "264",},
{"u=rx,g=x,o=w", "265",},
{"u=rx,g=x,o=x", "266",},
{"u=rx,g=x,o=", "267",},
{"u=rx,g=,o=rwx", "270",},
{"u=rx,g=,o=rw", "271",},
{"u=rx,g=,o=rx", "272",},
{"u=rx,g=,o=r", "273",},
{"u=rx,g=,o=wx", "274",},
{"u=rx,g=,o=w", "275",},
{"u=rx,g=,o=x", "276",},
{"u=rx,g=,o=", "277",},
{"u=r,g=rwx,o=rwx", "300",},
{"u=r,g=rwx,o=rw", "301",},
{"u=r,g=rwx,o=rx", "302",},
{"u=r,g=rwx,o=r", "303",},
{"u=r,g=rwx,o=wx", "304",},
{"u=r,g=rwx,o=w", "305",},
{"u=r,g=rwx,o=x", "306",},
{"u=r,g=rwx,o=", "307",},
{"u=r,g=rw,o=rwx", "310",},
{"u=r,g=rw,o=rw", "311",},
{"u=r,g=rw,o=rx", "312",},
{"u=r,g=rw,o=r", "313",},
{"u=r,g=rw,o=wx", "314",},
{"u=r,g=rw,o=w", "315",},
{"u=r,g=rw,o=x", "316",},
{"u=r,g=rw,o=", "317",},
{"u=r,g=rx,o=rwx", "320",},
{"u=r,g=rx,o=rw", "321",},
{"u=r,g=rx,o=rx", "322",},
{"u=r,g=rx,o=r", "323",},
{"u=r,g=rx,o=wx", "324",},
{"u=r,g=rx,o=w", "325",},
{"u=r,g=rx,o=x", "326",},
{"u=r,g=rx,o=", "327",},
{"u=r,g=r,o=rwx", "330",},
{"u=r,g=r,o=rw", "331",},
{"u=r,g=r,o=rx", "332",},
{"u=r,g=r,o=r", "333",},
{"u=r,g=r,o=wx", "334",},
{"u=r,g=r,o=w", "335",},
{"u=r,g=r,o=x", "336",},
{"u=r,g=r,o=", "337",},
{"u=r,g=wx,o=rwx", "340",},
{"u=r,g=wx,o=rw", "341",},
{"u=r,g=wx,o=rx", "342",},
{"u=r,g=wx,o=r", "343",},
{"u=r,g=wx,o=wx", "344",},
{"u=r,g=wx,o=w", "345",},
{"u=r,g=wx,o=x", "346",},
{"u=r,g=wx,o=", "347",},
{"u=r,g=w,o=rwx", "350",},
{"u=r,g=w,o=rw", "351",},
{"u=r,g=w,o=rx", "352",},
{"u=r,g=w,o=r", "353",},
{"u=r,g=w,o=wx", "354",},
{"u=r,g=w,o=w", "355",},
{"u=r,g=w,o=x", "356",},
{"u=r,g=w,o=", "357",},
{"u=r,g=x,o=rwx", "360",},
{"u=r,g=x,o=rw", "361",},
{"u=r,g=x,o=rx", "362",},
{"u=r,g=x,o=r", "363",},
{"u=r,g=x,o=wx", "364",},
{"u=r,g=x,o=w", "365",},
{"u=r,g=x,o=x", "366",},
{"u=r,g=x,o=", "367",},
{"u=r,g=,o=rwx", "370",},
{"u=r,g=,o=rw", "371",},
{"u=r,g=,o=rx", "372",},
{"u=r,g=,o=r", "373",},
{"u=r,g=,o=wx", "374",},
{"u=r,g=,o=w", "375",},
{"u=r,g=,o=x", "376",},
{"u=r,g=,o=", "377",},
{"u=wx,g=rwx,o=rwx", "400",},
{"u=wx,g=rwx,o=rw", "401",},
{"u=wx,g=rwx,o=rx", "402",},
{"u=wx,g=rwx,o=r", "403",},
{"u=wx,g=rwx,o=wx", "404",},
{"u=wx,g=rwx,o=w", "405",},
{"u=wx,g=rwx,o=x", "406",},
{"u=wx,g=rwx,o=", "407",},
{"u=wx,g=rw,o=rwx", "410",},
{"u=wx,g=rw,o=rw", "411",},
{"u=wx,g=rw,o=rx", "412",},
{"u=wx,g=rw,o=r", "413",},
{"u=wx,g=rw,o=wx", "414",},
{"u=wx,g=rw,o=w", "415",},
{"u=wx,g=rw,o=x", "416",},
{"u=wx,g=rw,o=", "417",},
{"u=wx,g=rx,o=rwx", "420",},
{"u=wx,g=rx,o=rw", "421",},
{"u=wx,g=rx,o=rx", "422",},
{"u=wx,g=rx,o=r", "423",},
{"u=wx,g=rx,o=wx", "424",},
{"u=wx,g=rx,o=w", "425",},
{"u=wx,g=rx,o=x", "426",},
{"u=wx,g=rx,o=", "427",},
{"u=wx,g=r,o=rwx", "430",},
{"u=wx,g=r,o=rw", "431",},
{"u=wx,g=r,o=rx", "432",},
{"u=wx,g=r,o=r", "433",},
{"u=wx,g=r,o=wx", "434",},
{"u=wx,g=r,o=w", "435",},
{"u=wx,g=r,o=x", "436",},
{"u=wx,g=r,o=", "437",},
{"u=wx,g=wx,o=rwx", "440",},
{"u=wx,g=wx,o=rw", "441",},
{"u=wx,g=wx,o=rx", "442",},
{"u=wx,g=wx,o=r", "443",},
{"u=wx,g=wx,o=wx", "444",},
{"u=wx,g=wx,o=w", "445",},
{"u=wx,g=wx,o=x", "446",},
{"u=wx,g=wx,o=", "447",},
{"u=wx,g=w,o=rwx", "450",},
{"u=wx,g=w,o=rw", "451",},
{"u=wx,g=w,o=rx", "452",},
{"u=wx,g=w,o=r", "453",},
{"u=wx,g=w,o=wx", "454",},
{"u=wx,g=w,o=w", "455",},
{"u=wx,g=w,o=x", "456",},
{"u=wx,g=w,o=", "457",},
{"u=wx,g=x,o=rwx", "460",},
{"u=wx,g=x,o=rw", "461",},
{"u=wx,g=x,o=rx", "462",},
{"u=wx,g=x,o=r", "463",},
{"u=wx,g=x,o=wx", "464",},
{"u=wx,g=x,o=w", "465",},
{"u=wx,g=x,o=x", "466",},
{"u=wx,g=x,o=", "467",},
{"u=wx,g=,o=rwx", "470",},
{"u=wx,g=,o=rw", "471",},
{"u=wx,g=,o=rx", "472",},
{"u=wx,g=,o=r", "473",},
{"u=wx,g=,o=wx", "474",},
{"u=wx,g=,o=w", "475",},
{"u=wx,g=,o=x", "476",},
{"u=wx,g=,o=", "477",},
{"u=w,g=rwx,o=rwx", "500",},
{"u=w,g=rwx,o=rw", "501",},
{"u=w,g=rwx,o=rx", "502",},
{"u=w,g=rwx,o=r", "503",},
{"u=w,g=rwx,o=wx", "504",},
{"u=w,g=rwx,o=w", "505",},
{"u=w,g=rwx,o=x", "506",},
{"u=w,g=rwx,o=", "507",},
{"u=w,g=rw,o=rwx", "510",},
{"u=w,g=rw,o=rw", "511",},
{"u=w,g=rw,o=rx", "512",},
{"u=w,g=rw,o=r", "513",},
{"u=w,g=rw,o=wx", "514",},
{"u=w,g=rw,o=w", "515",},
{"u=w,g=rw,o=x", "516",},
{"u=w,g=rw,o=", "517",},
{"u=w,g=rx,o=rwx", "520",},
{"u=w,g=rx,o=rw", "521",},
{"u=w,g=rx,o=rx", "522",},
{"u=w,g=rx,o=r", "523",},
{"u=w,g=rx,o=wx", "524",},
{"u=w,g=rx,o=w", "525",},
{"u=w,g=rx,o=x", "526",},
{"u=w,g=rx,o=", "527",},
{"u=w,g=r,o=rwx", "530",},
{"u=w,g=r,o=rw", "531",},
{"u=w,g=r,o=rx", "532",},
{"u=w,g=r,o=r", "533",},
{"u=w,g=r,o=wx", "534",},
{"u=w,g=r,o=w", "535",},
{"u=w,g=r,o=x", "536",},
{"u=w,g=r,o=", "537",},
{"u=w,g=wx,o=rwx", "540",},
{"u=w,g=wx,o=rw", "541",},
{"u=w,g=wx,o=rx", "542",},
{"u=w,g=wx,o=r", "543",},
{"u=w,g=wx,o=wx", "544",},
{"u=w,g=wx,o=w", "545",},
{"u=w,g=wx,o=x", "546",},
{"u=w,g=wx,o=", "547",},
{"u=w,g=w,o=rwx", "550",},
{"u=w,g=w,o=rw", "551",},
{"u=w,g=w,o=rx", "552",},
{"u=w,g=w,o=r", "553",},
{"u=w,g=w,o=wx", "554",},
{"u=w,g=w,o=w", "555",},
{"u=w,g=w,o=x", "556",},
{"u=w,g=w,o=", "557",},
{"u=w,g=x,o=rwx", "560",},
{"u=w,g=x,o=rw", "561",},
{"u=w,g=x,o=rx", "562",},
{"u=w,g=x,o=r", "563",},
{"u=w,g=x,o=wx", "564",},
{"u=w,g=x,o=w", "565",},
{"u=w,g=x,o=x", "566",},
{"u=w,g=x,o=", "567",},
{"u=w,g=,o=rwx", "570",},
{"u=w,g=,o=rw", "571",},
{"u=w,g=,o=rx", "572",},
{"u=w,g=,o=r", "573",},
{"u=w,g=,o=wx", "574",},
{"u=w,g=,o=w", "575",},
{"u=w,g=,o=x", "576",},
{"u=w,g=,o=", "577",},
{"u=x,g=rwx,o=rwx", "600",},
{"u=x,g=rwx,o=rw", "601",},
{"u=x,g=rwx,o=rx", "602",},
{"u=x,g=rwx,o=r", "603",},
{"u=x,g=rwx,o=wx", "604",},
{"u=x,g=rwx,o=w", "605",},
{"u=x,g=rwx,o=x", "606",},
{"u=x,g=rwx,o=", "607",},
{"u=x,g=rw,o=rwx", "610",},
{"u=x,g=rw,o=rw", "611",},
{"u=x,g=rw,o=rx", "612",},
{"u=x,g=rw,o=r", "613",},
{"u=x,g=rw,o=wx", "614",},
{"u=x,g=rw,o=w", "615",},
{"u=x,g=rw,o=x", "616",},
{"u=x,g=rw,o=", "617",},
{"u=x,g=rx,o=rwx", "620",},
{"u=x,g=rx,o=rw", "621",},
{"u=x,g=rx,o=rx", "622",},
{"u=x,g=rx,o=r", "623",},
{"u=x,g=rx,o=wx", "624",},
{"u=x,g=rx,o=w", "625",},
{"u=x,g=rx,o=x", "626",},
{"u=x,g=rx,o=", "627",},
{"u=x,g=r,o=rwx", "630",},
{"u=x,g=r,o=rw", "631",},
{"u=x,g=r,o=rx", "632",},
{"u=x,g=r,o=r", "633",},
{"u=x,g=r,o=wx", "634",},
{"u=x,g=r,o=w", "635",},
{"u=x,g=r,o=x", "636",},
{"u=x,g=r,o=", "637",},
{"u=x,g=wx,o=rwx", "640",},
{"u=x,g=wx,o=rw", "641",},
{"u=x,g=wx,o=rx", "642",},
{"u=x,g=wx,o=r", "643",},
{"u=x,g=wx,o=wx", "644",},
{"u=x,g=wx,o=w", "645",},
{"u=x,g=wx,o=x", "646",},
{"u=x,g=wx,o=", "647",},
{"u=x,g=w,o=rwx", "650",},
{"u=x,g=w,o=rw", "651",},
{"u=x,g=w,o=rx", "652",},
{"u=x,g=w,o=r", "653",},
{"u=x,g=w,o=wx", "654",},
{"u=x,g=w,o=w", "655",},
{"u=x,g=w,o=x", "656",},
{"u=x,g=w,o=", "657",},
{"u=x,g=x,o=rwx", "660",},
{"u=x,g=x,o=rw", "661",},
{"u=x,g=x,o=rx", "662",},
{"u=x,g=x,o=r", "663",},
{"u=x,g=x,o=wx", "664",},
{"u=x,g=x,o=w", "665",},
{"u=x,g=x,o=x", "666",},
{"u=x,g=x,o=", "667",},
{"u=x,g=,o=rwx", "670",},
{"u=x,g=,o=rw", "671",},
{"u=x,g=,o=rx", "672",},
{"u=x,g=,o=r", "673",},
{"u=x,g=,o=wx", "674",},
{"u=x,g=,o=w", "675",},
{"u=x,g=,o=x", "676",},
{"u=x,g=,o=", "677",},
{"u=,g=rwx,o=rwx", "700",},
{"u=,g=rwx,o=rw", "701",},
{"u=,g=rwx,o=rx", "702",},
{"u=,g=rwx,o=r", "703",},
{"u=,g=rwx,o=wx", "704",},
{"u=,g=rwx,o=w", "705",},
{"u=,g=rwx,o=x", "706",},
{"u=,g=rwx,o=", "707",},
{"u=,g=rw,o=rwx", "710",},
{"u=,g=rw,o=rw", "711",},
{"u=,g=rw,o=rx", "712",},
{"u=,g=rw,o=r", "713",},
{"u=,g=rw,o=wx", "714",},
{"u=,g=rw,o=w", "715",},
{"u=,g=rw,o=x", "716",},
{"u=,g=rw,o=", "717",},
{"u=,g=rx,o=rwx", "720",},
{"u=,g=rx,o=rw", "721",},
{"u=,g=rx,o=rx", "722",},
{"u=,g=rx,o=r", "723",},
{"u=,g=rx,o=wx", "724",},
{"u=,g=rx,o=w", "725",},
{"u=,g=rx,o=x", "726",},
{"u=,g=rx,o=", "727",},
{"u=,g=r,o=rwx", "730",},
{"u=,g=r,o=rw", "731",},
{"u=,g=r,o=rx", "732",},
{"u=,g=r,o=r", "733",},
{"u=,g=r,o=wx", "734",},
{"u=,g=r,o=w", "735",},
{"u=,g=r,o=x", "736",},
{"u=,g=r,o=", "737",},
{"u=,g=wx,o=rwx", "740",},
{"u=,g=wx,o=rw", "741",},
{"u=,g=wx,o=rx", "742",},
{"u=,g=wx,o=r", "743",},
{"u=,g=wx,o=wx", "744",},
{"u=,g=wx,o=w", "745",},
{"u=,g=wx,o=x", "746",},
{"u=,g=wx,o=", "747",},
{"u=,g=w,o=rwx", "750",},
{"u=,g=w,o=rw", "751",},
{"u=,g=w,o=rx", "752",},
{"u=,g=w,o=r", "753",},
{"u=,g=w,o=wx", "754",},
{"u=,g=w,o=w", "755",},
{"u=,g=w,o=x", "756",},
{"u=,g=w,o=", "757",},
{"u=,g=x,o=rwx", "760",},
{"u=,g=x,o=rw", "761",},
{"u=,g=x,o=rx", "762",},
{"u=,g=x,o=r", "763",},
{"u=,g=x,o=wx", "764",},
{"u=,g=x,o=w", "765",},
{"u=,g=x,o=x", "766",},
{"u=,g=x,o=", "767",},
{"u=,g=,o=rwx", "770",},
{"u=,g=,o=rw", "771",},
{"u=,g=,o=rx", "772",},
{"u=,g=,o=r", "773",},
{"u=,g=,o=wx", "774",},
{"u=,g=,o=w", "775",},
{"u=,g=,o=x", "776",},
{"u=,g=,o=", "777"}
};
for(int i = 0; i < symbolic.length; i += 2) {
conf.set(FsPermission.UMASK_LABEL, symbolic[i][0]);
short val = Short.valueOf(symbolic[i][1], 8);
assertEquals(val, FsPermission.getUMask(conf).toShort());
}
}
public void testBadUmasks() {
Configuration conf = new Configuration();
for(String b : new String [] {"1777", "22", "99", "foo", ""}) {
conf.set(FsPermission.UMASK_LABEL, b);
try {
FsPermission.getUMask(conf);
fail("Shouldn't have been able to parse bad umask");
} catch(IllegalArgumentException iae) {
assertTrue("Exception should specify parsing error and invalid umask: "
+ iae.getMessage(), isCorrectExceptionMessage(iae.getMessage(), b));
}
}
}
private boolean isCorrectExceptionMessage(String msg, String umask) {
return msg.contains("Unable to parse") &&
msg.contains(umask) &&
msg.contains("octal or symbolic");
}
// Ensure that when the deprecated decimal umask key is used, it is correctly
// parsed as such and converted correctly to an FsPermission value
public void testDeprecatedUmask() {
Configuration conf = new Configuration();
conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "302"); // 302 = 0456
FsPermission umask = FsPermission.getUMask(conf);
assertEquals(0456, umask.toShort());
}
}
| 24,274 | 33.238364 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestAcl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import static org.junit.Assert.*;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests covering basic functionality of the ACL objects.
*/
public class TestAcl {
private static AclEntry ENTRY1, ENTRY2, ENTRY3, ENTRY4, ENTRY5, ENTRY6,
ENTRY7, ENTRY8, ENTRY9, ENTRY10, ENTRY11, ENTRY12, ENTRY13;
private static AclStatus STATUS1, STATUS2, STATUS3, STATUS4;
@BeforeClass
public static void setUp() {
// named user
AclEntry.Builder aclEntryBuilder = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setName("user1")
.setPermission(FsAction.ALL);
ENTRY1 = aclEntryBuilder.build();
ENTRY2 = aclEntryBuilder.build();
// named group
ENTRY3 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setName("group2")
.setPermission(FsAction.READ_WRITE)
.build();
// default other
ENTRY4 = new AclEntry.Builder()
.setType(AclEntryType.OTHER)
.setPermission(FsAction.NONE)
.setScope(AclEntryScope.DEFAULT)
.build();
// owner
ENTRY5 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setPermission(FsAction.ALL)
.build();
// default named group
ENTRY6 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setName("group3")
.setPermission(FsAction.READ_WRITE)
.setScope(AclEntryScope.DEFAULT)
.build();
// other
ENTRY7 = new AclEntry.Builder()
.setType(AclEntryType.OTHER)
.setPermission(FsAction.NONE)
.build();
// default named user
ENTRY8 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setName("user3")
.setPermission(FsAction.ALL)
.setScope(AclEntryScope.DEFAULT)
.build();
// mask
ENTRY9 = new AclEntry.Builder()
.setType(AclEntryType.MASK)
.setPermission(FsAction.READ)
.build();
// default mask
ENTRY10 = new AclEntry.Builder()
.setType(AclEntryType.MASK)
.setPermission(FsAction.READ_EXECUTE)
.setScope(AclEntryScope.DEFAULT)
.build();
// group
ENTRY11 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setPermission(FsAction.READ)
.build();
// default group
ENTRY12 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setPermission(FsAction.READ)
.setScope(AclEntryScope.DEFAULT)
.build();
// default owner
ENTRY13 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setPermission(FsAction.ALL)
.setScope(AclEntryScope.DEFAULT)
.build();
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder()
.owner("owner1")
.group("group1")
.addEntry(ENTRY1)
.addEntry(ENTRY3)
.addEntry(ENTRY4);
STATUS1 = aclStatusBuilder.build();
STATUS2 = aclStatusBuilder.build();
STATUS3 = new AclStatus.Builder()
.owner("owner2")
.group("group2")
.stickyBit(true)
.build();
STATUS4 = new AclStatus.Builder()
.addEntry(ENTRY1)
.addEntry(ENTRY3)
.addEntry(ENTRY4)
.addEntry(ENTRY5)
.addEntry(ENTRY6)
.addEntry(ENTRY7)
.addEntry(ENTRY8)
.addEntry(ENTRY9)
.addEntry(ENTRY10)
.addEntry(ENTRY11)
.addEntry(ENTRY12)
.addEntry(ENTRY13)
.build();
}
@Test
public void testEntryEquals() {
assertNotSame(ENTRY1, ENTRY2);
assertNotSame(ENTRY1, ENTRY3);
assertNotSame(ENTRY1, ENTRY4);
assertNotSame(ENTRY2, ENTRY3);
assertNotSame(ENTRY2, ENTRY4);
assertNotSame(ENTRY3, ENTRY4);
assertEquals(ENTRY1, ENTRY1);
assertEquals(ENTRY2, ENTRY2);
assertEquals(ENTRY1, ENTRY2);
assertEquals(ENTRY2, ENTRY1);
assertFalse(ENTRY1.equals(ENTRY3));
assertFalse(ENTRY1.equals(ENTRY4));
assertFalse(ENTRY3.equals(ENTRY4));
assertFalse(ENTRY1.equals(null));
assertFalse(ENTRY1.equals(new Object()));
}
@Test
public void testEntryHashCode() {
assertEquals(ENTRY1.hashCode(), ENTRY2.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY3.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY4.hashCode());
assertFalse(ENTRY3.hashCode() == ENTRY4.hashCode());
}
@Test
public void testEntryScopeIsAccessIfUnspecified() {
assertEquals(AclEntryScope.ACCESS, ENTRY1.getScope());
assertEquals(AclEntryScope.ACCESS, ENTRY2.getScope());
assertEquals(AclEntryScope.ACCESS, ENTRY3.getScope());
assertEquals(AclEntryScope.DEFAULT, ENTRY4.getScope());
}
@Test
public void testStatusEquals() {
assertNotSame(STATUS1, STATUS2);
assertNotSame(STATUS1, STATUS3);
assertNotSame(STATUS2, STATUS3);
assertEquals(STATUS1, STATUS1);
assertEquals(STATUS2, STATUS2);
assertEquals(STATUS1, STATUS2);
assertEquals(STATUS2, STATUS1);
assertFalse(STATUS1.equals(STATUS3));
assertFalse(STATUS2.equals(STATUS3));
assertFalse(STATUS1.equals(null));
assertFalse(STATUS1.equals(new Object()));
}
@Test
public void testStatusHashCode() {
assertEquals(STATUS1.hashCode(), STATUS2.hashCode());
assertFalse(STATUS1.hashCode() == STATUS3.hashCode());
}
@Test
public void testToString() {
assertEquals("user:user1:rwx", ENTRY1.toString());
assertEquals("user:user1:rwx", ENTRY2.toString());
assertEquals("group:group2:rw-", ENTRY3.toString());
assertEquals("default:other::---", ENTRY4.toString());
assertEquals(
"owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}",
STATUS1.toString());
assertEquals(
"owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}",
STATUS2.toString());
assertEquals(
"owner: owner2, group: group2, acl: {entries: [], stickyBit: true}",
STATUS3.toString());
}
}
| 6,703 | 30.772512 | 127 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.sftp;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Shell;
import org.apache.sshd.SshServer;
import org.apache.sshd.common.NamedFactory;
import org.apache.sshd.server.Command;
import org.apache.sshd.server.PasswordAuthenticator;
import org.apache.sshd.server.UserAuth;
import org.apache.sshd.server.auth.UserAuthPassword;
import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider;
import org.apache.sshd.server.session.ServerSession;
import org.apache.sshd.server.sftp.SftpSubsystem;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
public class TestSFTPFileSystem {
private static final String TEST_SFTP_DIR = "testsftp";
private static final String TEST_ROOT_DIR =
System.getProperty("test.build.data", "build/test/data");
@Rule public TestName name = new TestName();
private static final String connection = "sftp://user:password@localhost";
private static Path localDir = null;
private static FileSystem localFs = null;
private static FileSystem sftpFs = null;
private static SshServer sshd = null;
private static int port;
private static void startSshdServer() throws IOException {
sshd = SshServer.setUpDefaultServer();
// ask OS to assign a port
sshd.setPort(0);
sshd.setKeyPairProvider(new SimpleGeneratorHostKeyProvider());
List<NamedFactory<UserAuth>> userAuthFactories =
new ArrayList<NamedFactory<UserAuth>>();
userAuthFactories.add(new UserAuthPassword.Factory());
sshd.setUserAuthFactories(userAuthFactories);
sshd.setPasswordAuthenticator(new PasswordAuthenticator() {
@Override
public boolean authenticate(String username, String password,
ServerSession session) {
if (username.equals("user") && password.equals("password")) {
return true;
}
return false;
}
});
sshd.setSubsystemFactories(
Arrays.<NamedFactory<Command>>asList(new SftpSubsystem.Factory()));
sshd.start();
port = sshd.getPort();
}
@BeforeClass
public static void setUp() throws Exception {
// skip all tests if running on Windows
assumeTrue(!Shell.WINDOWS);
startSshdServer();
Configuration conf = new Configuration();
conf.setClass("fs.sftp.impl", SFTPFileSystem.class, FileSystem.class);
conf.setInt("fs.sftp.host.port", port);
conf.setBoolean("fs.sftp.impl.disable.cache", true);
localFs = FileSystem.getLocal(conf);
localDir = localFs.makeQualified(new Path(TEST_ROOT_DIR, TEST_SFTP_DIR));
if (localFs.exists(localDir)) {
localFs.delete(localDir, true);
}
localFs.mkdirs(localDir);
sftpFs = FileSystem.get(URI.create(connection), conf);
}
@AfterClass
public static void tearDown() {
if (localFs != null) {
try {
localFs.delete(localDir, true);
localFs.close();
} catch (IOException e) {
// ignore
}
}
if (sftpFs != null) {
try {
sftpFs.close();
} catch (IOException e) {
// ignore
}
}
if (sshd != null) {
try {
sshd.stop(true);
} catch (InterruptedException e) {
// ignore
}
}
}
private static final Path touch(FileSystem fs, String filename)
throws IOException {
return touch(fs, filename, null);
}
private static final Path touch(FileSystem fs, String filename, byte[] data)
throws IOException {
Path lPath = new Path(localDir.toUri().getPath(), filename);
FSDataOutputStream out = null;
try {
out = fs.create(lPath);
if (data != null) {
out.write(data);
}
} finally {
if (out != null) {
out.close();
}
}
return lPath;
}
/**
* Creates a file and deletes it.
*
* @throws Exception
*/
@Test
public void testCreateFile() throws Exception {
Path file = touch(sftpFs, name.getMethodName().toLowerCase());
assertTrue(localFs.exists(file));
assertTrue(sftpFs.delete(file, false));
assertFalse(localFs.exists(file));
}
/**
* Checks if a new created file exists.
*
* @throws Exception
*/
@Test
public void testFileExists() throws Exception {
Path file = touch(localFs, name.getMethodName().toLowerCase());
assertTrue(sftpFs.exists(file));
assertTrue(localFs.exists(file));
assertTrue(sftpFs.delete(file, false));
assertFalse(sftpFs.exists(file));
assertFalse(localFs.exists(file));
}
/**
* Test writing to a file and reading its value.
*
* @throws Exception
*/
@Test
public void testReadFile() throws Exception {
byte[] data = "yaks".getBytes();
Path file = touch(localFs, name.getMethodName().toLowerCase(), data);
FSDataInputStream is = null;
try {
is = sftpFs.open(file);
byte[] b = new byte[data.length];
is.read(b);
assertArrayEquals(data, b);
} finally {
if (is != null) {
is.close();
}
}
assertTrue(sftpFs.delete(file, false));
}
/**
* Test getting the status of a file.
*
* @throws Exception
*/
@Test
public void testStatFile() throws Exception {
byte[] data = "yaks".getBytes();
Path file = touch(localFs, name.getMethodName().toLowerCase(), data);
FileStatus lstat = localFs.getFileStatus(file);
FileStatus sstat = sftpFs.getFileStatus(file);
assertNotNull(sstat);
assertEquals(lstat.getPath().toUri().getPath(),
sstat.getPath().toUri().getPath());
assertEquals(data.length, sstat.getLen());
assertEquals(lstat.getLen(), sstat.getLen());
assertTrue(sftpFs.delete(file, false));
}
/**
* Test deleting a non empty directory.
*
* @throws Exception
*/
@Test(expected=java.io.IOException.class)
public void testDeleteNonEmptyDir() throws Exception {
Path file = touch(localFs, name.getMethodName().toLowerCase());
sftpFs.delete(localDir, false);
}
/**
* Test deleting a file that does not exist.
*
* @throws Exception
*/
@Test
public void testDeleteNonExistFile() throws Exception {
Path file = new Path(localDir, name.getMethodName().toLowerCase());
assertFalse(sftpFs.delete(file, false));
}
/**
* Test renaming a file.
*
* @throws Exception
*/
@Test
public void testRenameFile() throws Exception {
byte[] data = "dingos".getBytes();
Path file1 = touch(localFs, name.getMethodName().toLowerCase() + "1");
Path file2 = new Path(localDir, name.getMethodName().toLowerCase() + "2");
assertTrue(sftpFs.rename(file1, file2));
assertTrue(sftpFs.exists(file2));
assertFalse(sftpFs.exists(file1));
assertTrue(localFs.exists(file2));
assertFalse(localFs.exists(file1));
assertTrue(sftpFs.delete(file2, false));
}
/**
* Test renaming a file that does not exist.
*
* @throws Exception
*/
@Test(expected=java.io.IOException.class)
public void testRenameNonExistFile() throws Exception {
Path file1 = new Path(localDir, name.getMethodName().toLowerCase() + "1");
Path file2 = new Path(localDir, name.getMethodName().toLowerCase() + "2");
sftpFs.rename(file1, file2);
}
/**
* Test renaming a file onto an existing file.
*
* @throws Exception
*/
@Test(expected=java.io.IOException.class)
public void testRenamingFileOntoExistingFile() throws Exception {
Path file1 = touch(localFs, name.getMethodName().toLowerCase() + "1");
Path file2 = touch(localFs, name.getMethodName().toLowerCase() + "2");
sftpFs.rename(file1, file2);
}
}
| 8,902 | 27.812298 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.junit.After;
import org.junit.Before;
/**
*
* Test the ViewFileSystemBaseTest using a viewfs with authority:
* viewfs://mountTableName/
* ie the authority is used to load a mount table.
* The authority name used is "default"
*
*/
public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
@Override
@Before
public void setUp() throws Exception {
// create the test root on local_fs
fsTarget = FileSystem.getLocal(new Configuration());
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
super.tearDown();
}
}
| 1,683 | 28.54386 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import org.apache.hadoop.fs.permission.AclEntry;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* <p>
* A collection of tests for the {@link ViewFileSystem}.
* This test should be used for testing ViewFileSystem that has mount links to
* a target file system such localFs or Hdfs etc.
* </p>
* <p>
* To test a given target file system create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fsTarget</code>
* to point to the file system to which you want the mount targets
*
* Since this a junit 4 you can also do a single setup before
* the start of any tests.
* E.g.
* @BeforeClass public static void clusterSetupAtBegining()
* @AfterClass public static void ClusterShutdownAtEnd()
* </p>
*/
public class ViewFileSystemBaseTest {
FileSystem fsView; // the view file system - the mounts are here
FileSystem fsTarget; // the target file system - the mount will point here
Path targetTestRoot;
Configuration conf;
final FileSystemTestHelper fileSystemTestHelper;
public ViewFileSystemBaseTest() {
this.fileSystemTestHelper = createFileSystemHelper();
}
protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper();
}
@Before
public void setUp() throws Exception {
initializeTargetTestRoot();
// Make user and data dirs - we creates links to them in the mount table
fsTarget.mkdirs(new Path(targetTestRoot,"user"));
fsTarget.mkdirs(new Path(targetTestRoot,"data"));
fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
// Now we use the mount fs to set links to user and dir
// in the test root
// Set up the defaultMT in the config with our mount point links
conf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
@After
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
}
void initializeTargetTestRoot() throws IOException {
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
// In case previous test was killed before cleanup
fsTarget.delete(targetTestRoot, true);
fsTarget.mkdirs(targetTestRoot);
}
void setupMountPoints() {
ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot, "user").toUri());
ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri());
ConfigUtil.addLink(conf, "/internalDir/linkToDir2",
new Path(targetTestRoot,"dir2").toUri());
ConfigUtil.addLink(conf, "/internalDir/internalDir2/linkToDir3",
new Path(targetTestRoot,"dir3").toUri());
ConfigUtil.addLink(conf, "/danglingLink",
new Path(targetTestRoot, "missingTarget").toUri());
ConfigUtil.addLink(conf, "/linkToAFile",
new Path(targetTestRoot, "aFile").toUri());
}
@Test
public void testGetMountPoints() {
ViewFileSystem viewfs = (ViewFileSystem) fsView;
MountPoint[] mountPoints = viewfs.getMountPoints();
Assert.assertEquals(getExpectedMountPoints(), mountPoints.length);
}
int getExpectedMountPoints() {
return 8;
}
/**
* This default implementation is when viewfs has mount points
* into file systems, such as LocalFs that do no have delegation tokens.
* It should be overridden for when mount points into hdfs.
*/
@Test
public void testGetDelegationTokens() throws IOException {
Token<?>[] delTokens =
fsView.addDelegationTokens("sanjay", new Credentials());
Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.length);
}
int getExpectedDelegationTokenCount() {
return 0;
}
@Test
public void testGetDelegationTokensWithCredentials() throws IOException {
Credentials credentials = new Credentials();
List<Token<?>> delTokens =
Arrays.asList(fsView.addDelegationTokens("sanjay", credentials));
int expectedTokenCount = getExpectedDelegationTokenCountWithCredentials();
Assert.assertEquals(expectedTokenCount, delTokens.size());
Credentials newCredentials = new Credentials();
for (int i = 0; i < expectedTokenCount / 2; i++) {
Token<?> token = delTokens.get(i);
newCredentials.addToken(token.getService(), token);
}
List<Token<?>> delTokens2 =
Arrays.asList(fsView.addDelegationTokens("sanjay", newCredentials));
Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens2.size());
}
int getExpectedDelegationTokenCountWithCredentials() {
return 0;
}
@Test
public void testBasicPaths() {
Assert.assertEquals(FsConstants.VIEWFS_URI,
fsView.getUri());
Assert.assertEquals(fsView.makeQualified(
new Path("/user/" + System.getProperty("user.name"))),
fsView.getWorkingDirectory());
Assert.assertEquals(fsView.makeQualified(
new Path("/user/" + System.getProperty("user.name"))),
fsView.getHomeDirectory());
Assert.assertEquals(
new Path("/foo/bar").makeQualified(FsConstants.VIEWFS_URI, null),
fsView.makeQualified(new Path("/foo/bar")));
}
@Test
public void testLocatedOperationsThroughMountLinks() throws IOException {
testOperationsThroughMountLinksInternal(true);
}
@Test
public void testOperationsThroughMountLinks() throws IOException {
testOperationsThroughMountLinksInternal(false);
}
/**
* Test modify operations (create, mkdir, delete, etc)
* on the mount file system where the pathname references through
* the mount points. Hence these operation will modify the target
* file system.
*
* Verify the operation via mountfs (ie fSys) and *also* via the
* target file system (ie fSysLocal) that the mount link points-to.
*/
private void testOperationsThroughMountLinksInternal(boolean located)
throws IOException {
// Create file
fileSystemTestHelper.createFile(fsView, "/user/foo");
Assert.assertTrue("Created file should be type file",
fsView.isFile(new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",
fsTarget.isFile(new Path(targetTestRoot,"user/foo")));
// Delete the created file
Assert.assertTrue("Delete should suceed",
fsView.delete(new Path("/user/foo"), false));
Assert.assertFalse("File should not exist after delete",
fsView.exists(new Path("/user/foo")));
Assert.assertFalse("Target File should not exist after delete",
fsTarget.exists(new Path(targetTestRoot,"user/foo")));
// Create file with a 2 component dirs
fileSystemTestHelper.createFile(fsView, "/internalDir/linkToDir2/foo");
Assert.assertTrue("Created file should be type file",
fsView.isFile(new Path("/internalDir/linkToDir2/foo")));
Assert.assertTrue("Target of created file should be type file",
fsTarget.isFile(new Path(targetTestRoot,"dir2/foo")));
// Delete the created file
Assert.assertTrue("Delete should suceed",
fsView.delete(new Path("/internalDir/linkToDir2/foo"), false));
Assert.assertFalse("File should not exist after delete",
fsView.exists(new Path("/internalDir/linkToDir2/foo")));
Assert.assertFalse("Target File should not exist after delete",
fsTarget.exists(new Path(targetTestRoot,"dir2/foo")));
// Create file with a 3 component dirs
fileSystemTestHelper.createFile(fsView, "/internalDir/internalDir2/linkToDir3/foo");
Assert.assertTrue("Created file should be type file",
fsView.isFile(new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertTrue("Target of created file should be type file",
fsTarget.isFile(new Path(targetTestRoot,"dir3/foo")));
// Recursive Create file with missing dirs
fileSystemTestHelper.createFile(fsView,
"/internalDir/linkToDir2/missingDir/miss2/foo");
Assert.assertTrue("Created file should be type file",
fsView.isFile(new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
Assert.assertTrue("Target of created file should be type file",
fsTarget.isFile(new Path(targetTestRoot,"dir2/missingDir/miss2/foo")));
// Delete the created file
Assert.assertTrue("Delete should succeed",
fsView.delete(
new Path("/internalDir/internalDir2/linkToDir3/foo"), false));
Assert.assertFalse("File should not exist after delete",
fsView.exists(new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertFalse("Target File should not exist after delete",
fsTarget.exists(new Path(targetTestRoot,"dir3/foo")));
// mkdir
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/user/dirX"));
Assert.assertTrue("New dir should be type dir",
fsView.isDirectory(new Path("/user/dirX")));
Assert.assertTrue("Target of new dir should be of type dir",
fsTarget.isDirectory(new Path(targetTestRoot,"user/dirX")));
fsView.mkdirs(
fileSystemTestHelper.getTestRootPath(fsView, "/user/dirX/dirY"));
Assert.assertTrue("New dir should be type dir",
fsView.isDirectory(new Path("/user/dirX/dirY")));
Assert.assertTrue("Target of new dir should be of type dir",
fsTarget.isDirectory(new Path(targetTestRoot,"user/dirX/dirY")));
// Delete the created dir
Assert.assertTrue("Delete should succeed",
fsView.delete(new Path("/user/dirX/dirY"), false));
Assert.assertFalse("File should not exist after delete",
fsView.exists(new Path("/user/dirX/dirY")));
Assert.assertFalse("Target File should not exist after delete",
fsTarget.exists(new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",
fsView.delete(new Path("/user/dirX"), false));
Assert.assertFalse("File should not exist after delete",
fsView.exists(new Path("/user/dirX")));
Assert.assertFalse(fsTarget.exists(new Path(targetTestRoot,"user/dirX")));
// Rename a file
fileSystemTestHelper.createFile(fsView, "/user/foo");
fsView.rename(new Path("/user/foo"), new Path("/user/fooBar"));
Assert.assertFalse("Renamed src should not exist",
fsView.exists(new Path("/user/foo")));
Assert.assertFalse("Renamed src should not exist in target",
fsTarget.exists(new Path(targetTestRoot,"user/foo")));
Assert.assertTrue("Renamed dest should exist as file",
fsView.isFile(fileSystemTestHelper.getTestRootPath(fsView,"/user/fooBar")));
Assert.assertTrue("Renamed dest should exist as file in target",
fsTarget.isFile(new Path(targetTestRoot,"user/fooBar")));
fsView.mkdirs(new Path("/user/dirFoo"));
fsView.rename(new Path("/user/dirFoo"), new Path("/user/dirFooBar"));
Assert.assertFalse("Renamed src should not exist",
fsView.exists(new Path("/user/dirFoo")));
Assert.assertFalse("Renamed src should not exist in target",
fsTarget.exists(new Path(targetTestRoot,"user/dirFoo")));
Assert.assertTrue("Renamed dest should exist as dir",
fsView.isDirectory(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirFooBar")));
Assert.assertTrue("Renamed dest should exist as dir in target",
fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
// Make a directory under a directory that's mounted from the root of another FS
fsView.mkdirs(new Path("/targetRoot/dirFoo"));
Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
boolean dirFooPresent = false;
for (FileStatus fileStatus :
listStatusInternal(located, new Path("/targetRoot/"))) {
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent = true;
}
}
Assert.assertTrue(dirFooPresent);
}
// rename across mount points that point to same target also fail
@Test(expected=IOException.class)
public void testRenameAcrossMounts1() throws IOException {
fileSystemTestHelper.createFile(fsView, "/user/foo");
fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
/* - code if we had wanted this to suceed
Assert.assertFalse(fSys.exists(new Path("/user/foo")));
Assert.assertFalse(fSysLocal.exists(new Path(targetTestRoot,"user/foo")));
Assert.assertTrue(fSys.isFile(FileSystemTestHelper.getTestRootPath(fSys,"/user2/fooBarBar")));
Assert.assertTrue(fSysLocal.isFile(new Path(targetTestRoot,"user/fooBarBar")));
*/
}
// rename across mount points fail if the mount link targets are different
// even if the targets are part of the same target FS
@Test(expected=IOException.class)
public void testRenameAcrossMounts2() throws IOException {
fileSystemTestHelper.createFile(fsView, "/user/foo");
fsView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
}
static protected boolean SupportsBlocks = false; // local fs use 1 block
// override for HDFS
@Test
public void testGetBlockLocations() throws IOException {
Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
FileSystemTestHelper.createFile(fsTarget,
targetFilePath, 10, 1024);
Path viewFilePath = new Path("/data/largeFile");
Assert.assertTrue("Created File should be type File",
fsView.isFile(viewFilePath));
BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100);
Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
compareBLs(viewBL, targetBL);
// Same test but now get it via the FileStatus Parameter
fsView.getFileBlockLocations(
fsView.getFileStatus(viewFilePath), 0, 10240+100);
targetBL = fsTarget.getFileBlockLocations(
fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
compareBLs(viewBL, targetBL);
}
void compareBLs(BlockLocation[] viewBL, BlockLocation[] targetBL) {
Assert.assertEquals(targetBL.length, viewBL.length);
int i = 0;
for (BlockLocation vbl : viewBL) {
Assert.assertEquals(vbl.toString(), targetBL[i].toString());
Assert.assertEquals(targetBL[i].getOffset(), vbl.getOffset());
Assert.assertEquals(targetBL[i].getLength(), vbl.getLength());
i++;
}
}
@Test
public void testLocatedListOnInternalDirsOfMountTable() throws IOException {
testListOnInternalDirsOfMountTableInternal(true);
}
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
// test list on internal dirs of mount table
@Test
public void testListOnInternalDirsOfMountTable() throws IOException {
testListOnInternalDirsOfMountTableInternal(false);
}
private void testListOnInternalDirsOfMountTableInternal(boolean located)
throws IOException {
// list on Slash
FileStatus[] dirPaths = listStatusInternal(located, new Path("/"));
FileStatus fs;
verifyRootChildren(dirPaths);
// list on internal dir
dirPaths = listStatusInternal(located, new Path("/internalDir"));
Assert.assertEquals(2, dirPaths.length);
fs = fileSystemTestHelper.containsPath(fsView, "/internalDir/internalDir2", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isDirectory());
fs = fileSystemTestHelper.containsPath(fsView, "/internalDir/linkToDir2",
dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
}
private void verifyRootChildren(FileStatus[] dirPaths) throws IOException {
FileStatus fs;
Assert.assertEquals(getExpectedDirPaths(), dirPaths.length);
fs = fileSystemTestHelper.containsPath(fsView, "/user", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
fs = fileSystemTestHelper.containsPath(fsView, "/data", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
fs = fileSystemTestHelper.containsPath(fsView, "/internalDir", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isDirectory());
fs = fileSystemTestHelper.containsPath(fsView, "/danglingLink", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
fs = fileSystemTestHelper.containsPath(fsView, "/linkToAFile", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
}
int getExpectedDirPaths() {
return 7;
}
@Test
public void testListOnMountTargetDirs() throws IOException {
testListOnMountTargetDirsInternal(false);
}
@Test
public void testLocatedListOnMountTargetDirs() throws IOException {
testListOnMountTargetDirsInternal(true);
}
private void testListOnMountTargetDirsInternal(boolean located)
throws IOException {
final Path dataPath = new Path("/data");
FileStatus[] dirPaths = listStatusInternal(located, dataPath);
FileStatus fs;
Assert.assertEquals(0, dirPaths.length);
// add a file
long len = fileSystemTestHelper.createFile(fsView, "/data/foo");
dirPaths = listStatusInternal(located, dataPath);
Assert.assertEquals(1, dirPaths.length);
fs = fileSystemTestHelper.containsPath(fsView, "/data/foo", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created file shoudl appear as a file", fs.isFile());
Assert.assertEquals(len, fs.getLen());
// add a dir
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/data/dirX"));
dirPaths = listStatusInternal(located, dataPath);
Assert.assertEquals(2, dirPaths.length);
fs = fileSystemTestHelper.containsPath(fsView, "/data/foo", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created file shoudl appear as a file", fs.isFile());
fs = fileSystemTestHelper.containsPath(fsView, "/data/dirX", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created dir should appear as a dir", fs.isDirectory());
}
private FileStatus[] listStatusInternal(boolean located, Path dataPath) throws IOException {
FileStatus[] dirPaths = new FileStatus[0];
if (located) {
RemoteIterator<LocatedFileStatus> statIter =
fsView.listLocatedStatus(dataPath);
ArrayList<LocatedFileStatus> tmp = new ArrayList<LocatedFileStatus>(10);
while (statIter.hasNext()) {
tmp.add(statIter.next());
}
dirPaths = tmp.toArray(dirPaths);
} else {
dirPaths = fsView.listStatus(dataPath);
}
return dirPaths;
}
@Test
public void testFileStatusOnMountLink() throws IOException {
Assert.assertTrue(fsView.getFileStatus(new Path("/")).isDirectory());
checkFileStatus(fsView, "/", fileType.isDir);
checkFileStatus(fsView, "/user", fileType.isDir); // link followed => dir
checkFileStatus(fsView, "/data", fileType.isDir);
checkFileStatus(fsView, "/internalDir", fileType.isDir);
checkFileStatus(fsView, "/internalDir/linkToDir2", fileType.isDir);
checkFileStatus(fsView, "/internalDir/internalDir2/linkToDir3",
fileType.isDir);
checkFileStatus(fsView, "/linkToAFile", fileType.isFile);
}
@Test(expected=FileNotFoundException.class)
public void testgetFSonDanglingLink() throws IOException {
fsView.getFileStatus(new Path("/danglingLink"));
}
@Test(expected=FileNotFoundException.class)
public void testgetFSonNonExistingInternalDir() throws IOException {
fsView.getFileStatus(new Path("/internalDir/nonExisting"));
}
/*
* Test resolvePath(p)
*/
@Test
public void testResolvePathInternalPaths() throws IOException {
Assert.assertEquals(new Path("/"), fsView.resolvePath(new Path("/")));
Assert.assertEquals(new Path("/internalDir"),
fsView.resolvePath(new Path("/internalDir")));
}
@Test
public void testResolvePathMountPoints() throws IOException {
Assert.assertEquals(new Path(targetTestRoot,"user"),
fsView.resolvePath(new Path("/user")));
Assert.assertEquals(new Path(targetTestRoot,"data"),
fsView.resolvePath(new Path("/data")));
Assert.assertEquals(new Path(targetTestRoot,"dir2"),
fsView.resolvePath(new Path("/internalDir/linkToDir2")));
Assert.assertEquals(new Path(targetTestRoot,"dir3"),
fsView.resolvePath(new Path("/internalDir/internalDir2/linkToDir3")));
}
@Test
public void testResolvePathThroughMountPoints() throws IOException {
fileSystemTestHelper.createFile(fsView, "/user/foo");
Assert.assertEquals(new Path(targetTestRoot,"user/foo"),
fsView.resolvePath(new Path("/user/foo")));
fsView.mkdirs(
fileSystemTestHelper.getTestRootPath(fsView, "/user/dirX"));
Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),
fsView.resolvePath(new Path("/user/dirX")));
fsView.mkdirs(
fileSystemTestHelper.getTestRootPath(fsView, "/user/dirX/dirY"));
Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),
fsView.resolvePath(new Path("/user/dirX/dirY")));
}
@Test(expected=FileNotFoundException.class)
public void testResolvePathDanglingLink() throws IOException {
fsView.resolvePath(new Path("/danglingLink"));
}
@Test(expected=FileNotFoundException.class)
public void testResolvePathMissingThroughMountPoints() throws IOException {
fsView.resolvePath(new Path("/user/nonExisting"));
}
@Test(expected=FileNotFoundException.class)
public void testResolvePathMissingThroughMountPoints2() throws IOException {
fsView.mkdirs(
fileSystemTestHelper.getTestRootPath(fsView, "/user/dirX"));
fsView.resolvePath(new Path("/user/dirX/nonExisting"));
}
/**
* Test modify operations (create, mkdir, rename, etc)
* on internal dirs of mount table
* These operations should fail since the mount table is read-only or
* because the internal dir that it is trying to create already
* exits.
*/
// Mkdir on existing internal mount table succeed except for /
@Test(expected=AccessControlException.class)
public void testInternalMkdirSlash() throws IOException {
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/"));
}
public void testInternalMkdirExisting1() throws IOException {
Assert.assertTrue("mkdir of existing dir should succeed",
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,
"/internalDir")));
}
public void testInternalMkdirExisting2() throws IOException {
Assert.assertTrue("mkdir of existing dir should succeed",
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,
"/internalDir/linkToDir2")));
}
// Mkdir for new internal mount table should fail
@Test(expected=AccessControlException.class)
public void testInternalMkdirNew() throws IOException {
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/dirNew"));
}
@Test(expected=AccessControlException.class)
public void testInternalMkdirNew2() throws IOException {
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/internalDir/dirNew"));
}
// Create File on internal mount table should fail
@Test(expected=AccessControlException.class)
public void testInternalCreate1() throws IOException {
fileSystemTestHelper.createFile(fsView, "/foo"); // 1 component
}
@Test(expected=AccessControlException.class)
public void testInternalCreate2() throws IOException { // 2 component
fileSystemTestHelper.createFile(fsView, "/internalDir/foo");
}
@Test(expected=AccessControlException.class)
public void testInternalCreateMissingDir() throws IOException {
fileSystemTestHelper.createFile(fsView, "/missingDir/foo");
}
@Test(expected=AccessControlException.class)
public void testInternalCreateMissingDir2() throws IOException {
fileSystemTestHelper.createFile(fsView, "/missingDir/miss2/foo");
}
@Test(expected=AccessControlException.class)
public void testInternalCreateMissingDir3() throws IOException {
fileSystemTestHelper.createFile(fsView, "/internalDir/miss2/foo");
}
// Delete on internal mount table should fail
@Test(expected=FileNotFoundException.class)
public void testInternalDeleteNonExisting() throws IOException {
fsView.delete(new Path("/NonExisting"), false);
}
@Test(expected=FileNotFoundException.class)
public void testInternalDeleteNonExisting2() throws IOException {
fsView.delete(new Path("/internalDir/NonExisting"), false);
}
@Test(expected=AccessControlException.class)
public void testInternalDeleteExisting() throws IOException {
fsView.delete(new Path("/internalDir"), false);
}
@Test(expected=AccessControlException.class)
public void testInternalDeleteExisting2() throws IOException {
fsView.getFileStatus(
new Path("/internalDir/linkToDir2")).isDirectory();
fsView.delete(new Path("/internalDir/linkToDir2"), false);
}
@Test
public void testMkdirOfMountLink() throws IOException {
// data exists - mkdirs returns true even though no permission in internal
// mount table
Assert.assertTrue("mkdir of existing mount link should succeed",
fsView.mkdirs(new Path("/data")));
}
// Rename on internal mount table should fail
@Test(expected=AccessControlException.class)
public void testInternalRename1() throws IOException {
fsView.rename(new Path("/internalDir"), new Path("/newDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRename2() throws IOException {
fsView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory();
fsView.rename(new Path("/internalDir/linkToDir2"),
new Path("/internalDir/dir1"));
}
@Test(expected=AccessControlException.class)
public void testInternalRename3() throws IOException {
fsView.rename(new Path("/user"), new Path("/internalDir/linkToDir2"));
}
@Test(expected=AccessControlException.class)
public void testInternalRenameToSlash() throws IOException {
fsView.rename(new Path("/internalDir/linkToDir2/foo"), new Path("/"));
}
@Test(expected=AccessControlException.class)
public void testInternalRenameFromSlash() throws IOException {
fsView.rename(new Path("/"), new Path("/bar"));
}
@Test(expected=AccessControlException.class)
public void testInternalSetOwner() throws IOException {
fsView.setOwner(new Path("/internalDir"), "foo", "bar");
}
@Test
public void testCreateNonRecursive() throws IOException {
Path path = fileSystemTestHelper.getTestRootPath(fsView, "/user/foo");
fsView.createNonRecursive(path, false, 1024, (short)1, 1024L, null);
FileStatus status = fsView.getFileStatus(new Path("/user/foo"));
Assert.assertTrue("Created file should be type file",
fsView.isFile(new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",
fsTarget.isFile(new Path(targetTestRoot, "user/foo")));
}
@Test
public void testRootReadableExecutable() throws IOException {
testRootReadableExecutableInternal(false);
}
@Test
public void testLocatedRootReadableExecutable() throws IOException {
testRootReadableExecutableInternal(true);
}
private void testRootReadableExecutableInternal(boolean located)
throws IOException {
// verify executable permission on root: cd /
//
Assert.assertFalse("In root before cd",
fsView.getWorkingDirectory().isRoot());
fsView.setWorkingDirectory(new Path("/"));
Assert.assertTrue("Not in root dir after cd",
fsView.getWorkingDirectory().isRoot());
// verify readable
//
verifyRootChildren(listStatusInternal(located,
fsView.getWorkingDirectory()));
// verify permissions
//
final FileStatus rootStatus =
fsView.getFileStatus(fsView.getWorkingDirectory());
final FsPermission perms = rootStatus.getPermission();
Assert.assertTrue("User-executable permission not set!",
perms.getUserAction().implies(FsAction.EXECUTE));
Assert.assertTrue("User-readable permission not set!",
perms.getUserAction().implies(FsAction.READ));
Assert.assertTrue("Group-executable permission not set!",
perms.getGroupAction().implies(FsAction.EXECUTE));
Assert.assertTrue("Group-readable permission not set!",
perms.getGroupAction().implies(FsAction.READ));
Assert.assertTrue("Other-executable permission not set!",
perms.getOtherAction().implies(FsAction.EXECUTE));
Assert.assertTrue("Other-readable permission not set!",
perms.getOtherAction().implies(FsAction.READ));
}
/**
* Verify the behavior of ACL operations on paths above the root of
* any mount table entry.
*/
@Test(expected=AccessControlException.class)
public void testInternalModifyAclEntries() throws IOException {
fsView.modifyAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAclEntries() throws IOException {
fsView.removeAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveDefaultAcl() throws IOException {
fsView.removeDefaultAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAcl() throws IOException {
fsView.removeAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalSetAcl() throws IOException {
fsView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
}
@Test
public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
AclStatus aclStatus = fsView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(), currentUser.getUserName());
assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),
AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
@Test(expected=AccessControlException.class)
public void testInternalSetXAttr() throws IOException {
fsView.setXAttr(new Path("/internalDir"), "xattrName", null);
}
@Test(expected=NotInMountpointException.class)
public void testInternalGetXAttr() throws IOException {
fsView.getXAttr(new Path("/internalDir"), "xattrName");
}
@Test(expected=NotInMountpointException.class)
public void testInternalGetXAttrs() throws IOException {
fsView.getXAttrs(new Path("/internalDir"));
}
@Test(expected=NotInMountpointException.class)
public void testInternalGetXAttrsWithNames() throws IOException {
fsView.getXAttrs(new Path("/internalDir"), new ArrayList<String>());
}
@Test(expected=NotInMountpointException.class)
public void testInternalListXAttr() throws IOException {
fsView.listXAttrs(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveXAttr() throws IOException {
fsView.removeXAttr(new Path("/internalDir"), "xattrName");
}
@Test(expected = AccessControlException.class)
public void testInternalCreateSnapshot1() throws IOException {
fsView.createSnapshot(new Path("/internalDir"));
}
@Test(expected = AccessControlException.class)
public void testInternalCreateSnapshot2() throws IOException {
fsView.createSnapshot(new Path("/internalDir"), "snap1");
}
@Test(expected = AccessControlException.class)
public void testInternalRenameSnapshot() throws IOException {
fsView.renameSnapshot(new Path("/internalDir"), "snapOldName",
"snapNewName");
}
@Test(expected = AccessControlException.class)
public void testInternalDeleteSnapshot() throws IOException {
fsView.deleteSnapshot(new Path("/internalDir"), "snap1");
}
}
| 34,925 | 38.286839 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test ViewFileSystem's support for having delegation tokens fetched and cached
* for the file system.
*
* Currently this class just ensures that getCanonicalServiceName() always
* returns <code>null</code> for ViewFileSystem instances.
*/
public class TestViewFileSystemDelegationTokenSupport {
private static final String MOUNT_TABLE_NAME = "vfs-cluster";
static Configuration conf;
static FileSystem viewFs;
static FakeFileSystem fs1;
static FakeFileSystem fs2;
@BeforeClass
public static void setup() throws Exception {
conf = ViewFileSystemTestSetup.createConfig();
fs1 = setupFileSystem(new URI("fs1:///"), FakeFileSystem.class);
fs2 = setupFileSystem(new URI("fs2:///"), FakeFileSystem.class);
viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
static FakeFileSystem setupFileSystem(URI uri, Class<? extends FileSystem> clazz)
throws Exception {
String scheme = uri.getScheme();
conf.set("fs."+scheme+".impl", clazz.getName());
FakeFileSystem fs = (FakeFileSystem)FileSystem.get(uri, conf);
// mount each fs twice, will later ensure 1 token/fs
ConfigUtil.addLink(conf, "/mounts/"+scheme+"-one", fs.getUri());
ConfigUtil.addLink(conf, "/mounts/"+scheme+"-two", fs.getUri());
return fs;
}
/**
* Regression test for HADOOP-8408.
*/
@Test
public void testGetCanonicalServiceNameWithNonDefaultMountTable()
throws URISyntaxException, IOException {
Configuration conf = new Configuration();
ConfigUtil.addLink(conf, MOUNT_TABLE_NAME, "/user", new URI("file:///"));
FileSystem viewFs = FileSystem.get(new URI(FsConstants.VIEWFS_SCHEME +
"://" + MOUNT_TABLE_NAME), conf);
String serviceName = viewFs.getCanonicalServiceName();
assertNull(serviceName);
}
@Test
public void testGetCanonicalServiceNameWithDefaultMountTable()
throws URISyntaxException, IOException {
Configuration conf = new Configuration();
ConfigUtil.addLink(conf, "/user", new URI("file:///"));
FileSystem viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
String serviceName = viewFs.getCanonicalServiceName();
assertNull(serviceName);
}
@Test
public void testGetChildFileSystems() throws Exception {
assertNull(fs1.getChildFileSystems());
assertNull(fs2.getChildFileSystems());
List<FileSystem> children = Arrays.asList(viewFs.getChildFileSystems());
assertEquals(2, children.size());
assertTrue(children.contains(fs1));
assertTrue(children.contains(fs2));
}
@Test
public void testAddDelegationTokens() throws Exception {
Credentials creds = new Credentials();
Token<?> fs1Tokens[] = addTokensWithCreds(fs1, creds);
assertEquals(1, fs1Tokens.length);
assertEquals(1, creds.numberOfTokens());
Token<?> fs2Tokens[] = addTokensWithCreds(fs2, creds);
assertEquals(1, fs2Tokens.length);
assertEquals(2, creds.numberOfTokens());
Credentials savedCreds = creds;
creds = new Credentials();
// should get the same set of tokens as explicitly fetched above
Token<?> viewFsTokens[] = viewFs.addDelegationTokens("me", creds);
assertEquals(2, viewFsTokens.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(), creds.numberOfTokens());
// should get none, already have all tokens
viewFsTokens = viewFs.addDelegationTokens("me", creds);
assertEquals(0, viewFsTokens.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(), creds.numberOfTokens());
}
Token<?>[] addTokensWithCreds(FileSystem fs, Credentials creds) throws Exception {
Credentials savedCreds;
savedCreds = new Credentials(creds);
Token<?> tokens[] = fs.addDelegationTokens("me", creds);
// test that we got the token we wanted, and that creds were modified
assertEquals(1, tokens.length);
assertEquals(fs.getCanonicalServiceName(), tokens[0].getService().toString());
assertTrue(creds.getAllTokens().contains(tokens[0]));
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens()+1, creds.numberOfTokens());
// shouldn't get any new tokens since already in creds
savedCreds = new Credentials(creds);
Token<?> tokenRefetch[] = fs.addDelegationTokens("me", creds);
assertEquals(0, tokenRefetch.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(), creds.numberOfTokens());
return tokens;
}
static class FakeFileSystem extends RawLocalFileSystem {
URI uri;
@Override
public void initialize(URI name, Configuration conf) throws IOException {
this.uri = name;
}
@Override
public Path getInitialWorkingDirectory() {
return new Path("/"); // ctor calls getUri before the uri is inited...
}
@Override
public URI getUri() {
return uri;
}
@Override
public String getCanonicalServiceName() {
return String.valueOf(this.getUri()+"/"+this.hashCode());
}
@Override
public Token<?> getDelegationToken(String renewer) throws IOException {
Token<?> token = new Token<TokenIdentifier>();
token.setService(new Text(getCanonicalServiceName()));
return token;
}
@Override
public void close() {}
}
}
| 6,968 | 34.92268 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.junit.AfterClass;
import org.junit.Test;
import org.mockito.Mockito;
import static org.junit.Assert.*;
/**
* The FileStatus is being serialized in MR as jobs are submitted.
* Since viewfs has overlayed ViewFsFileStatus, we ran into
* serialization problems. THis test is test the fix.
*/
public class TestViewfsFileStatus {
private static final File TEST_DIR =
new File(System.getProperty("test.build.data", "/tmp"),
TestViewfsFileStatus.class.getSimpleName());
@Test
public void testFileStatusSerialziation()
throws IOException, URISyntaxException {
String testfilename = "testFileStatusSerialziation";
TEST_DIR.mkdirs();
File infile = new File(TEST_DIR, testfilename);
final byte[] content = "dingos".getBytes();
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infile);
fos.write(content);
} finally {
if (fos != null) {
fos.close();
}
}
assertEquals((long)content.length, infile.length());
Configuration conf = new Configuration();
ConfigUtil.addLink(conf, "/foo/bar/baz", TEST_DIR.toURI());
FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
assertEquals(ViewFileSystem.class, vfs.getClass());
FileStatus stat = vfs.getFileStatus(new Path("/foo/bar/baz", testfilename));
assertEquals(content.length, stat.getLen());
// check serialization/deserialization
DataOutputBuffer dob = new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
FileStatus deSer = new FileStatus();
deSer.readFields(dib);
assertEquals(content.length, deSer.getLen());
}
// Tests that ViewFileSystem.getFileChecksum calls res.targetFileSystem
// .getFileChecksum with res.remainingPath and not with f
@Test
public void testGetFileChecksum() throws IOException {
FileSystem mockFS = Mockito.mock(FileSystem.class);
InodeTree.ResolveResult<FileSystem> res =
new InodeTree.ResolveResult<FileSystem>(null, mockFS , null,
new Path("someFile"));
@SuppressWarnings("unchecked")
InodeTree<FileSystem> fsState = Mockito.mock(InodeTree.class);
Mockito.when(fsState.resolve("/tmp/someFile", true)).thenReturn(res);
ViewFileSystem vfs = Mockito.mock(ViewFileSystem.class);
vfs.fsState = fsState;
Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile")))
.thenCallRealMethod();
vfs.getFileChecksum(new Path("/tmp/someFile"));
Mockito.verify(mockFS).getFileChecksum(new Path("someFile"));
}
@AfterClass
public static void cleanup() throws IOException {
FileUtil.fullyDelete(TEST_DIR);
}
}
| 4,007 | 34.469027 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextPermissionBase;
import org.junit.After;
import org.junit.Before;
public class TestFcPermissionsLocalFs extends FileContextPermissionBase {
@Override
@Before
public void setUp() throws Exception {
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
ViewFsTestSetup.tearDownForViewFsLocalFs(fileContextTestHelper);
}
@Override
protected FileContext getFileContext() throws Exception {
return ViewFsTestSetup.setupForViewFsLocalFs(fileContextTestHelper);
}
}
| 1,470 | 30.297872 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.viewfs.TestChRootedFileSystem.MockFileSystem;
import org.junit.*;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
/**
* Verify that viewfs propagates certain methods to the underlying fs
*/
public class TestViewFileSystemDelegation { //extends ViewFileSystemTestSetup {
static Configuration conf;
static FileSystem viewFs;
static FakeFileSystem fs1;
static FakeFileSystem fs2;
@BeforeClass
public static void setup() throws Exception {
conf = ViewFileSystemTestSetup.createConfig();
fs1 = setupFileSystem(new URI("fs1:/"), FakeFileSystem.class);
fs2 = setupFileSystem(new URI("fs2:/"), FakeFileSystem.class);
viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
static FakeFileSystem setupFileSystem(URI uri, Class clazz)
throws Exception {
String scheme = uri.getScheme();
conf.set("fs."+scheme+".impl", clazz.getName());
FakeFileSystem fs = (FakeFileSystem)FileSystem.get(uri, conf);
assertEquals(uri, fs.getUri());
Path targetPath = new FileSystemTestHelper().getAbsoluteTestRootPath(fs);
ConfigUtil.addLink(conf, "/mounts/"+scheme, targetPath.toUri());
return fs;
}
private static FileSystem setupMockFileSystem(Configuration conf, URI uri)
throws Exception {
String scheme = uri.getScheme();
conf.set("fs." + scheme + ".impl", MockFileSystem.class.getName());
FileSystem fs = FileSystem.get(uri, conf);
ConfigUtil.addLink(conf, "/mounts/" + scheme, uri);
return ((MockFileSystem)fs).getRawFileSystem();
}
@Test
public void testSanity() {
assertEquals("fs1:/", fs1.getUri().toString());
assertEquals("fs2:/", fs2.getUri().toString());
}
@Test
public void testVerifyChecksum() throws Exception {
checkVerifyChecksum(false);
checkVerifyChecksum(true);
}
/**
* Tests that ViewFileSystem dispatches calls for every ACL method through the
* mount table to the correct underlying FileSystem with all Path arguments
* translated as required.
*/
@Test
public void testAclMethods() throws Exception {
Configuration conf = ViewFileSystemTestSetup.createConfig();
FileSystem mockFs1 = setupMockFileSystem(conf, new URI("mockfs1:/"));
FileSystem mockFs2 = setupMockFileSystem(conf, new URI("mockfs2:/"));
FileSystem viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
Path viewFsPath1 = new Path("/mounts/mockfs1/a/b/c");
Path mockFsPath1 = new Path("/a/b/c");
Path viewFsPath2 = new Path("/mounts/mockfs2/d/e/f");
Path mockFsPath2 = new Path("/d/e/f");
List<AclEntry> entries = Collections.emptyList();
viewFs.modifyAclEntries(viewFsPath1, entries);
verify(mockFs1).modifyAclEntries(mockFsPath1, entries);
viewFs.modifyAclEntries(viewFsPath2, entries);
verify(mockFs2).modifyAclEntries(mockFsPath2, entries);
viewFs.removeAclEntries(viewFsPath1, entries);
verify(mockFs1).removeAclEntries(mockFsPath1, entries);
viewFs.removeAclEntries(viewFsPath2, entries);
verify(mockFs2).removeAclEntries(mockFsPath2, entries);
viewFs.removeDefaultAcl(viewFsPath1);
verify(mockFs1).removeDefaultAcl(mockFsPath1);
viewFs.removeDefaultAcl(viewFsPath2);
verify(mockFs2).removeDefaultAcl(mockFsPath2);
viewFs.removeAcl(viewFsPath1);
verify(mockFs1).removeAcl(mockFsPath1);
viewFs.removeAcl(viewFsPath2);
verify(mockFs2).removeAcl(mockFsPath2);
viewFs.setAcl(viewFsPath1, entries);
verify(mockFs1).setAcl(mockFsPath1, entries);
viewFs.setAcl(viewFsPath2, entries);
verify(mockFs2).setAcl(mockFsPath2, entries);
viewFs.getAclStatus(viewFsPath1);
verify(mockFs1).getAclStatus(mockFsPath1);
viewFs.getAclStatus(viewFsPath2);
verify(mockFs2).getAclStatus(mockFsPath2);
}
void checkVerifyChecksum(boolean flag) {
viewFs.setVerifyChecksum(flag);
assertEquals(flag, fs1.getVerifyChecksum());
assertEquals(flag, fs2.getVerifyChecksum());
}
static class FakeFileSystem extends LocalFileSystem {
boolean verifyChecksum = true;
URI uri;
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
this.uri = uri;
}
@Override
public URI getUri() {
return uri;
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) {
this.verifyChecksum = verifyChecksum;
}
public boolean getVerifyChecksum(){
return verifyChecksum;
}
}
}
| 5,779 | 33.819277 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextMainOperationsBaseTest;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
public class TestFcMainOperationsLocalFs extends
FileContextMainOperationsBaseTest {
FileContext fclocal;
Path targetOfTests;
@Override
@Before
public void setUp() throws Exception {
fc = ViewFsTestSetup.setupForViewFsLocalFs(fileContextTestHelper);
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
ViewFsTestSetup.tearDownForViewFsLocalFs(fileContextTestHelper);
}
@Override
protected boolean listCorruptedBlocksSupported() {
return false;
}
}
| 1,584 | 28.351852 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.viewfs.ChRootedFileSystem;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class TestChRootedFileSystem {
FileSystem fSys; // The ChRoootedFs
FileSystem fSysTarget; //
Path chrootedTo;
FileSystemTestHelper fileSystemTestHelper;
@Before
public void setUp() throws Exception {
// create the test root on local_fs
Configuration conf = new Configuration();
fSysTarget = FileSystem.getLocal(conf);
fileSystemTestHelper = new FileSystemTestHelper();
chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
// In case previous test was killed before cleanup
fSysTarget.delete(chrootedTo, true);
fSysTarget.mkdirs(chrootedTo);
// ChRoot to the root of the testDirectory
fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf);
}
@After
public void tearDown() throws Exception {
fSysTarget.delete(chrootedTo, true);
}
@Test
public void testURI() {
URI uri = fSys.getUri();
Assert.assertEquals(chrootedTo.toUri(), uri);
}
@Test
public void testBasicPaths() {
URI uri = fSys.getUri();
Assert.assertEquals(chrootedTo.toUri(), uri);
Assert.assertEquals(fSys.makeQualified(
new Path(System.getProperty("user.home"))),
fSys.getWorkingDirectory());
Assert.assertEquals(fSys.makeQualified(
new Path(System.getProperty("user.home"))),
fSys.getHomeDirectory());
/*
* ChRootedFs as its uri like file:///chrootRoot.
* This is questionable since path.makequalified(uri, path) ignores
* the pathPart of a uri. So our notion of chrooted URI is questionable.
* But if we were to fix Path#makeQualified() then the next test should
* have been:
Assert.assertEquals(
new Path(chrootedTo + "/foo/bar").makeQualified(
FsConstants.LOCAL_FS_URI, null),
fSys.makeQualified(new Path( "/foo/bar")));
*/
Assert.assertEquals(
new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI, null),
fSys.makeQualified(new Path("/foo/bar")));
}
/**
* Test modify operations (create, mkdir, delete, etc)
*
* Verify the operation via chrootedfs (ie fSys) and *also* via the
* target file system (ie fSysTarget) that has been chrooted.
*/
@Test
public void testCreateDelete() throws IOException {
// Create file
fileSystemTestHelper.createFile(fSys, "/foo");
Assert.assertTrue(fSys.isFile(new Path("/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo, "foo")));
// Create file with recursive dir
fileSystemTestHelper.createFile(fSys, "/newDir/foo");
Assert.assertTrue(fSys.isFile(new Path("/newDir/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/foo")));
// Delete the created file
Assert.assertTrue(fSys.delete(new Path("/newDir/foo"), false));
Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo, "newDir/foo")));
// Create file with a 2 component dirs recursively
fileSystemTestHelper.createFile(fSys, "/newDir/newDir2/foo");
Assert.assertTrue(fSys.isFile(new Path("/newDir/newDir2/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/newDir2/foo")));
// Delete the created file
Assert.assertTrue(fSys.delete(new Path("/newDir/newDir2/foo"), false));
Assert.assertFalse(fSys.exists(new Path("/newDir/newDir2/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/newDir2/foo")));
}
@Test
public void testMkdirDelete() throws IOException {
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirX"));
Assert.assertTrue(fSys.isDirectory(new Path("/dirX")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX")));
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirX/dirY"));
Assert.assertTrue(fSys.isDirectory(new Path("/dirX/dirY")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX/dirY")));
// Delete the created dir
Assert.assertTrue(fSys.delete(new Path("/dirX/dirY"), false));
Assert.assertFalse(fSys.exists(new Path("/dirX/dirY")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fSys.delete(new Path("/dirX"), false));
Assert.assertFalse(fSys.exists(new Path("/dirX")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"dirX")));
}
@Test
public void testRename() throws IOException {
// Rename a file
fileSystemTestHelper.createFile(fSys, "/newDir/foo");
fSys.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(fSys.isFile(fileSystemTestHelper.getTestRootPath(fSys,"/newDir/fooBar")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/fooBar")));
// Rename a dir
fSys.mkdirs(new Path("/newDir/dirFoo"));
fSys.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
Assert.assertFalse(fSys.exists(new Path("/newDir/dirFoo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/dirFoo")));
Assert.assertTrue(fSys.isDirectory(fileSystemTestHelper.getTestRootPath(fSys,"/newDir/dirFooBar")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar")));
}
@Test
public void testGetContentSummary() throws IOException {
// GetContentSummary of a dir
fSys.mkdirs(new Path("/newDir/dirFoo"));
ContentSummary cs = fSys.getContentSummary(new Path("/newDir/dirFoo"));
Assert.assertEquals(-1L, cs.getQuota());
Assert.assertEquals(-1L, cs.getSpaceQuota());
}
/**
* We would have liked renames across file system to fail but
* Unfortunately there is not way to distinguish the two file systems
* @throws IOException
*/
@Test
public void testRenameAcrossFs() throws IOException {
fSys.mkdirs(new Path("/newDir/dirFoo"));
fSys.rename(new Path("/newDir/dirFoo"), new Path("file:///tmp/dirFooBar"));
FileSystemTestHelper.isDir(fSys, new Path("/tmp/dirFooBar"));
}
@Test
public void testList() throws IOException {
FileStatus fs = fSys.getFileStatus(new Path("/"));
Assert.assertTrue(fs.isDirectory());
// should return the full path not the chrooted path
Assert.assertEquals(fs.getPath(), chrootedTo);
// list on Slash
FileStatus[] dirPaths = fSys.listStatus(new Path("/"));
Assert.assertEquals(0, dirPaths.length);
fileSystemTestHelper.createFile(fSys, "/foo");
fileSystemTestHelper.createFile(fSys, "/bar");
fSys.mkdirs(new Path("/dirX"));
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirY"));
fSys.mkdirs(new Path("/dirX/dirXX"));
dirPaths = fSys.listStatus(new Path("/"));
Assert.assertEquals(4, dirPaths.length); // note 2 crc files
// Note the the file status paths are the full paths on target
fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "foo"), dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "bar"), dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirX"), dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirY"), dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
}
@Test
public void testWorkingDirectory() throws Exception {
// First we cd to our test root
fSys.mkdirs(new Path("/testWd"));
Path workDir = new Path("/testWd");
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir, fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir, fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(), fSys.getWorkingDirectory());
// cd using a relative path
// Go back to our test root
workDir = new Path("/testWd");
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir, fSys.getWorkingDirectory());
Path relativeDir = new Path("existingDir1");
Path absoluteDir = new Path(workDir,"existingDir1");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
// cd using a absolute path
absoluteDir = new Path("/test/existingDir2");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
// Now open a file relative to the wd we just set above.
Path absoluteFooPath = new Path(absoluteDir, "foo");
fSys.create(absoluteFooPath).close();
fSys.open(new Path("foo")).close();
// Now mkdir relative to the dir we cd'ed to
fSys.mkdirs(new Path("newDir"));
Assert.assertTrue(fSys.isDirectory(new Path(absoluteDir, "newDir")));
/* Filesystem impls (RawLocal and DistributedFileSystem do not check
* for existing of working dir
absoluteDir = getTestRootPath(fSys, "nonexistingPath");
try {
fSys.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
} catch (Exception e) {
// Exception as expected
}
*/
// Try a URI
final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
}
/*
* Test resolvePath(p)
*/
@Test
public void testResolvePath() throws IOException {
Assert.assertEquals(chrootedTo, fSys.resolvePath(new Path("/")));
fileSystemTestHelper.createFile(fSys, "/foo");
Assert.assertEquals(new Path(chrootedTo, "foo"),
fSys.resolvePath(new Path("/foo")));
}
@Test(expected=FileNotFoundException.class)
public void testResolvePathNonExisting() throws IOException {
fSys.resolvePath(new Path("/nonExisting"));
}
@Test
public void testDeleteOnExitPathHandling() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem)chrootFs.getRawFileSystem())
.getRawFileSystem();
// ensure delete propagates the correct path
Path chrootPath = new Path("/c");
Path rawPath = new Path("/a/b/c");
chrootFs.delete(chrootPath, false);
verify(mockFs).delete(eq(rawPath), eq(false));
reset(mockFs);
// fake that the path exists for deleteOnExit
FileStatus stat = mock(FileStatus.class);
when(mockFs.getFileStatus(eq(rawPath))).thenReturn(stat);
// ensure deleteOnExit propagates the correct path
chrootFs.deleteOnExit(chrootPath);
chrootFs.close();
verify(mockFs).delete(eq(rawPath), eq(true));
}
@Test
public void testURIEmptyPath() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo");
new ChRootedFileSystem(chrootUri, conf);
}
/**
* Tests that ChRootedFileSystem delegates calls for every ACL method to the
* underlying FileSystem with all Path arguments translated as required to
* enforce chroot.
*/
@Test
public void testAclMethodsPathTranslation() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem)chrootFs.getRawFileSystem())
.getRawFileSystem();
Path chrootPath = new Path("/c");
Path rawPath = new Path("/a/b/c");
List<AclEntry> entries = Collections.emptyList();
chrootFs.modifyAclEntries(chrootPath, entries);
verify(mockFs).modifyAclEntries(rawPath, entries);
chrootFs.removeAclEntries(chrootPath, entries);
verify(mockFs).removeAclEntries(rawPath, entries);
chrootFs.removeDefaultAcl(chrootPath);
verify(mockFs).removeDefaultAcl(rawPath);
chrootFs.removeAcl(chrootPath);
verify(mockFs).removeAcl(rawPath);
chrootFs.setAcl(chrootPath, entries);
verify(mockFs).setAcl(rawPath, entries);
chrootFs.getAclStatus(chrootPath);
verify(mockFs).getAclStatus(rawPath);
}
@Test
public void testListLocatedFileStatus() throws IOException {
final Path mockMount = new Path("mockfs://foo/user");
final Path mockPath = new Path("/usermock");
final Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
ConfigUtil.addLink(conf, mockPath.toString(), mockMount.toUri());
FileSystem vfs = FileSystem.get(URI.create("viewfs:///"), conf);
vfs.listLocatedStatus(mockPath);
final FileSystem mockFs = ((MockFileSystem)mockMount.getFileSystem(conf))
.getRawFileSystem();
verify(mockFs).listLocatedStatus(new Path(mockMount.toUri().getPath()));
}
static class MockFileSystem extends FilterFileSystem {
MockFileSystem() {
super(mock(FileSystem.class));
}
@Override
public void initialize(URI name, Configuration conf) throws IOException {}
}
@Test(timeout = 30000)
public void testCreateSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path("/a/b/snapPath");
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
.getRawFileSystem();
chrootFs.createSnapshot(snapRootPath, "snap1");
verify(mockFs).createSnapshot(chRootedSnapRootPath, "snap1");
}
@Test(timeout = 30000)
public void testDeleteSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path("/a/b/snapPath");
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
.getRawFileSystem();
chrootFs.deleteSnapshot(snapRootPath, "snap1");
verify(mockFs).deleteSnapshot(chRootedSnapRootPath, "snap1");
}
@Test(timeout = 30000)
public void testRenameSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path("/a/b/snapPath");
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
.getRawFileSystem();
chrootFs.renameSnapshot(snapRootPath, "snapOldName", "snapNewName");
verify(mockFs).renameSnapshot(chRootedSnapRootPath, "snapOldName",
"snapNewName");
}
}
| 17,514 | 36.108051 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.fs.viewfs.InodeTree;
import org.junit.Test;
public class TestViewFsConfig {
@Test(expected=FileAlreadyExistsException.class)
public void testInvalidConfig() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
ConfigUtil.addLink(conf, "/internalDir/linkToDir2",
new Path("file:///dir2").toUri());
ConfigUtil.addLink(conf, "/internalDir/linkToDir2/linkToDir3",
new Path("file:///dir3").toUri());
class Foo { };
new InodeTree<Foo>(conf, null) {
@Override
protected
Foo getTargetFileSystem(final URI uri)
throws URISyntaxException, UnsupportedFileSystemException {
return null;
}
@Override
protected
Foo getTargetFileSystem(
org.apache.hadoop.fs.viewfs.InodeTree.INodeDir<Foo>
dir)
throws URISyntaxException {
return null;
}
@Override
protected
Foo getTargetFileSystem(URI[] mergeFsURIList)
throws URISyntaxException, UnsupportedFileSystemException {
return null;
}
};
}
}
| 2,344 | 30.266667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import org.apache.hadoop.fs.FileContext;
import org.junit.After;
import org.junit.Before;
public class TestViewFsLocalFs extends ViewFsBaseTest {
@Override
@Before
public void setUp() throws Exception {
// create the test root on local_fs
fcTarget = FileContext.getLocalFSFileContext();
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
}
}
| 1,273 | 27.954545 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.util.Shell;
import org.mortbay.log.Log;
/**
* This class is for setup and teardown for viewFileSystem so that
* it can be tested via the standard FileSystem tests.
*
* If tests launched via ant (build.xml) the test root is absolute path
* If tests launched via eclipse, the test root is
* is a test dir below the working directory. (see FileContextTestHelper)
*
* We set a viewFileSystems with 3 mount points:
* 1) /<firstComponent>" of testdir pointing to same in target fs
* 2) /<firstComponent>" of home pointing to same in target fs
* 3) /<firstComponent>" of wd pointing to same in target fs
* (note in many cases the link may be the same - viewFileSytem handles this)
*
* We also set the view file system's wd to point to the wd.
*/
public class ViewFileSystemTestSetup {
static public String ViewFSTestDir = "/testDir";
/**
*
* @param fsTarget - the target fs of the view fs.
* @return return the ViewFS File context to be used for tests
* @throws Exception
*/
static public FileSystem setupForViewFileSystem(Configuration conf, FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception {
/**
* create the test root on local_fs - the mount table will point here
*/
Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget);
// In case previous test was killed before cleanup
fsTarget.delete(targetOfTests, true);
fsTarget.mkdirs(targetOfTests);
// Set up viewfs link for test dir as described above
String testDir = fileSystemTestHelper.getTestRootPath(fsTarget).toUri()
.getPath();
linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
// Set up viewfs link for home dir as described above
setUpHomeDir(conf, fsTarget);
// the test path may be relative to working dir - we need to make that work:
// Set up viewfs link for wd as described above
String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
Log.info("Working dir is: " + fsView.getWorkingDirectory());
return fsView;
}
/**
*
* delete the test directory in the target fs
*/
static public void tearDown(FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception {
Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget);
fsTarget.delete(targetOfTests, true);
}
public static Configuration createConfig() {
return createConfig(true);
}
public static Configuration createConfig(boolean disableCache) {
Configuration conf = new Configuration();
conf.set("fs.viewfs.impl", ViewFileSystem.class.getName());
if (disableCache) {
conf.set("fs.viewfs.impl.disable.cache", "true");
}
return conf;
}
static void setUpHomeDir(Configuration conf, FileSystem fsTarget) {
String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
int indexOf2ndSlash = homeDir.indexOf('/', 1);
if (indexOf2ndSlash >0) {
linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
} else { // home dir is at root. Just link the home dir itse
URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
ConfigUtil.addLink(conf, homeDir, linkTarget);
Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
}
// Now set the root of the home dir for viewfs
String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
ConfigUtil.setHomeDirConf(conf, homeDirRoot);
Log.info("Home dir base for viewfs" + homeDirRoot);
}
/*
* Set up link in config for first component of path to the same
* in the target file system.
*/
static void linkUpFirstComponents(Configuration conf, String path, FileSystem fsTarget, String info) {
int indexOfEnd = path.indexOf('/', 1);
if (Shell.WINDOWS) {
indexOfEnd = path.indexOf('/', indexOfEnd + 1);
}
String firstComponent = path.substring(0, indexOfEnd);
URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
ConfigUtil.addLink(conf, firstComponent, linkTarget);
Log.info("Added link for " + info + " "
+ firstComponent + "->" + linkTarget);
}
}
| 5,633 | 37.855172 | 152 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
*
* Test the ViewFsBaseTest using a viewfs with authority:
* viewfs://mountTableName/
* ie the authority is used to load a mount table.
* The authority name used is "default"
*
*/
public class TestViewFileSystemWithAuthorityLocalFileSystem extends ViewFileSystemBaseTest {
URI schemeWithAuthority;
@Override
@Before
public void setUp() throws Exception {
// create the test root on local_fs
fsTarget = FileSystem.getLocal(new Configuration());
super.setUp(); // this sets up conf (and fcView which we replace)
// Now create a viewfs using a mount table called "default"
// hence viewfs://default/
schemeWithAuthority =
new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null);
fsView = FileSystem.get(schemeWithAuthority, conf);
}
@Override
@After
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
super.tearDown();
}
@Override
@Test
public void testBasicPaths() {
Assert.assertEquals(schemeWithAuthority,
fsView.getUri());
Assert.assertEquals(fsView.makeQualified(
new Path("/user/" + System.getProperty("user.name"))),
fsView.getWorkingDirectory());
Assert.assertEquals(fsView.makeQualified(
new Path("/user/" + System.getProperty("user.name"))),
fsView.getHomeDirectory());
Assert.assertEquals(
new Path("/foo/bar").makeQualified(schemeWithAuthority, null),
fsView.makeQualified(new Path("/foo/bar")));
}
}
| 2,724 | 32.231707 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.util.Shell;
import org.mortbay.log.Log;
/**
* This class is for setup and teardown for viewFs so that
* it can be tested via the standard FileContext tests.
*
* If tests launched via ant (build.xml) the test root is absolute path
* If tests launched via eclipse, the test root is
* is a test dir below the working directory. (see FileContextTestHelper)
*
* We set a viewfs with 3 mount points:
* 1) /<firstComponent>" of testdir pointing to same in target fs
* 2) /<firstComponent>" of home pointing to same in target fs
* 3) /<firstComponent>" of wd pointing to same in target fs
* (note in many cases the link may be the same - viewfs handles this)
*
* We also set the view file system's wd to point to the wd.
*/
public class ViewFsTestSetup {
static public String ViewFSTestDir = "/testDir";
/*
* return the ViewFS File context to be used for tests
*/
static public FileContext setupForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
/**
* create the test root on local_fs - the mount table will point here
*/
FileContext fsTarget = FileContext.getLocalFSFileContext();
Path targetOfTests = helper.getTestRootPath(fsTarget);
// In case previous test was killed before cleanup
fsTarget.delete(targetOfTests, true);
fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
Configuration conf = new Configuration();
// Set up viewfs link for test dir as described above
String testDir = helper.getTestRootPath(fsTarget).toUri()
.getPath();
linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
// Set up viewfs link for home dir as described above
setUpHomeDir(conf, fsTarget);
// the test path may be relative to working dir - we need to make that work:
// Set up viewfs link for wd as described above
String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
Log.info("Working dir is: " + fc.getWorkingDirectory());
//System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
//System.out.println("TargetOfTests = "+ targetOfTests.toUri());
return fc;
}
/**
*
* delete the test directory in the target local fs
*/
static public void tearDownForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
FileContext fclocal = FileContext.getLocalFSFileContext();
Path targetOfTests = helper.getTestRootPath(fclocal);
fclocal.delete(targetOfTests, true);
}
static void setUpHomeDir(Configuration conf, FileContext fsTarget) {
String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
int indexOf2ndSlash = homeDir.indexOf('/', 1);
if (indexOf2ndSlash >0) {
linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
} else { // home dir is at root. Just link the home dir itse
URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
ConfigUtil.addLink(conf, homeDir, linkTarget);
Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
}
// Now set the root of the home dir for viewfs
String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
ConfigUtil.setHomeDirConf(conf, homeDirRoot);
Log.info("Home dir base for viewfs" + homeDirRoot);
}
/*
* Set up link in config for first component of path to the same
* in the target file system.
*/
static void linkUpFirstComponents(Configuration conf, String path,
FileContext fsTarget, String info) {
int indexOfEnd = path.indexOf('/', 1);
if (Shell.WINDOWS) {
indexOfEnd = path.indexOf('/', indexOfEnd + 1);
}
String firstComponent = path.substring(0, indexOfEnd);
URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
ConfigUtil.addLink(conf, firstComponent, linkTarget);
Log.info("Added link for " + info + " "
+ firstComponent + "->" + linkTarget);
}
}
| 5,355 | 38.382353 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTest {
FileSystem fcTarget;
@Override
protected FileSystem createFileSystem() throws Exception {
return ViewFileSystemTestSetup.setupForViewFileSystem(
ViewFileSystemTestSetup.createConfig(), this, fcTarget);
}
@Override
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
fcTarget = FileSystem.getLocal(conf);
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
ViewFileSystemTestSetup.tearDown(this, fcTarget);
}
@Test
@Override
public void testWDAbsolute() throws IOException {
Path absoluteDir = getTestRootPath(fSys, "test/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
}
}
| 2,080 | 30.059701 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestTrash;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestViewFsTrash {
FileSystem fsTarget; // the target file system - the mount will point here
FileSystem fsView;
Configuration conf;
FileSystemTestHelper fileSystemTestHelper = new FileSystemTestHelper();
class TestLFS extends LocalFileSystem {
Path home;
TestLFS() throws IOException {
this(new Path(fileSystemTestHelper.getTestRootDir()));
}
TestLFS(Path home) throws IOException {
super();
this.home = home;
}
@Override
public Path getHomeDirectory() {
return home;
}
}
@Before
public void setUp() throws Exception {
fsTarget = FileSystem.getLocal(new Configuration());
fsTarget.mkdirs(new Path(fileSystemTestHelper.
getTestRootPath(fsTarget), "dir1"));
conf = ViewFileSystemTestSetup.createConfig();
fsView = ViewFileSystemTestSetup.setupForViewFileSystem(conf, fileSystemTestHelper, fsTarget);
conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
}
@After
public void tearDown() throws Exception {
ViewFileSystemTestSetup.tearDown(fileSystemTestHelper, fsTarget);
fsTarget.delete(new Path(fsTarget.getHomeDirectory(), ".Trash/Current"),
true);
}
@Test
public void testTrash() throws IOException {
TestTrash.trashShell(conf, fileSystemTestHelper.getTestRootPath(fsView),
fsTarget, new Path(fsTarget.getHomeDirectory(), ".Trash/Current"));
}
}
| 2,662 | 32.708861 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import static org.apache.hadoop.fs.FileContextTestHelper.checkFileLinkStatus;
import static org.apache.hadoop.fs.FileContextTestHelper.checkFileStatus;
import static org.apache.hadoop.fs.FileContextTestHelper.exists;
import static org.apache.hadoop.fs.FileContextTestHelper.isDir;
import static org.apache.hadoop.fs.FileContextTestHelper.isFile;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.FileContextTestHelper.fileType;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
/**
* <p>
* A collection of tests for the {@link ViewFs}.
* This test should be used for testing ViewFs that has mount links to
* a target file system such localFs or Hdfs etc.
* </p>
* <p>
* To test a given target file system create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fcTarget</code>
* to point to the file system to which you want the mount targets
*
* Since this a junit 4 you can also do a single setup before
* the start of any tests.
* E.g.
* @BeforeClass public static void clusterSetupAtBegining()
* @AfterClass public static void ClusterShutdownAtEnd()
* </p>
*/
public class ViewFsBaseTest {
FileContext fcView; // the view file system - the mounts are here
FileContext fcTarget; // the target file system - the mount will point here
Path targetTestRoot;
Configuration conf;
FileContext xfcViewWithAuthority; // same as fsView but with authority
URI schemeWithAuthority;
final FileContextTestHelper fileContextTestHelper = createFileContextHelper();
protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper();
}
@Before
public void setUp() throws Exception {
initializeTargetTestRoot();
// Make user and data dirs - we creates links to them in the mount table
fcTarget.mkdir(new Path(targetTestRoot,"user"),
FileContext.DEFAULT_PERM, true);
fcTarget.mkdir(new Path(targetTestRoot,"data"),
FileContext.DEFAULT_PERM, true);
fcTarget.mkdir(new Path(targetTestRoot,"dir2"),
FileContext.DEFAULT_PERM, true);
fcTarget.mkdir(new Path(targetTestRoot,"dir3"),
FileContext.DEFAULT_PERM, true);
FileContextTestHelper.createFile(fcTarget, new Path(targetTestRoot,"aFile"));
// Now we use the mount fs to set links to user and dir
// in the test root
// Set up the defaultMT in the config with our mount point links
conf = new Configuration();
ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
ConfigUtil.addLink(conf, "/user",
new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/user2",
new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/data",
new Path(targetTestRoot,"data").toUri());
ConfigUtil.addLink(conf, "/internalDir/linkToDir2",
new Path(targetTestRoot,"dir2").toUri());
ConfigUtil.addLink(conf, "/internalDir/internalDir2/linkToDir3",
new Path(targetTestRoot,"dir3").toUri());
ConfigUtil.addLink(conf, "/danglingLink",
new Path(targetTestRoot,"missingTarget").toUri());
ConfigUtil.addLink(conf, "/linkToAFile",
new Path(targetTestRoot,"aFile").toUri());
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
// Also try viewfs://default/ - note authority is name of mount table
}
void initializeTargetTestRoot() throws IOException {
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
// In case previous test was killed before cleanup
fcTarget.delete(targetTestRoot, true);
fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
}
@After
public void tearDown() throws Exception {
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
}
@Test
public void testGetMountPoints() {
ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem();
MountPoint[] mountPoints = viewfs.getMountPoints();
Assert.assertEquals(8, mountPoints.length);
}
int getExpectedDelegationTokenCount() {
return 0;
}
/**
* This default implementation is when viewfs has mount points
* into file systems, such as LocalFs that do no have delegation tokens.
* It should be overridden for when mount points into hdfs.
*/
@Test
public void testGetDelegationTokens() throws IOException {
List<Token<?>> delTokens =
fcView.getDelegationTokens(new Path("/"), "sanjay");
Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size());
}
@Test
public void testBasicPaths() {
Assert.assertEquals(FsConstants.VIEWFS_URI,
fcView.getDefaultFileSystem().getUri());
Assert.assertEquals(fcView.makeQualified(
new Path("/user/" + System.getProperty("user.name"))),
fcView.getWorkingDirectory());
Assert.assertEquals(fcView.makeQualified(
new Path("/user/" + System.getProperty("user.name"))),
fcView.getHomeDirectory());
Assert.assertEquals(
new Path("/foo/bar").makeQualified(FsConstants.VIEWFS_URI, null),
fcView.makeQualified(new Path("/foo/bar")));
}
/**
* Test modify operations (create, mkdir, delete, etc)
* on the mount file system where the pathname references through
* the mount points. Hence these operation will modify the target
* file system.
*
* Verify the operation via mountfs (ie fc) and *also* via the
* target file system (ie fclocal) that the mount link points-to.
*/
@Test
public void testOperationsThroughMountLinks() throws IOException {
// Create file
fileContextTestHelper.createFileNonRecursive(fcView, "/user/foo");
Assert.assertTrue("Create file should be file",
isFile(fcView, new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",
isFile(fcTarget, new Path(targetTestRoot,"user/foo")));
// Delete the created file
Assert.assertTrue("Delete should succeed",
fcView.delete(new Path("/user/foo"), false));
Assert.assertFalse("File should not exist after delete",
exists(fcView, new Path("/user/foo")));
Assert.assertFalse("Target File should not exist after delete",
exists(fcTarget, new Path(targetTestRoot,"user/foo")));
// Create file with a 2 component dirs
fileContextTestHelper.createFileNonRecursive(fcView,
"/internalDir/linkToDir2/foo");
Assert.assertTrue("Created file should be type file",
isFile(fcView, new Path("/internalDir/linkToDir2/foo")));
Assert.assertTrue("Target of created file should be type file",
isFile(fcTarget, new Path(targetTestRoot,"dir2/foo")));
// Delete the created file
Assert.assertTrue("Delete should suceed",
fcView.delete(new Path("/internalDir/linkToDir2/foo"),false));
Assert.assertFalse("File should not exist after deletion",
exists(fcView, new Path("/internalDir/linkToDir2/foo")));
Assert.assertFalse("Target should not exist after deletion",
exists(fcTarget, new Path(targetTestRoot,"dir2/foo")));
// Create file with a 3 component dirs
fileContextTestHelper.createFileNonRecursive(fcView,
"/internalDir/internalDir2/linkToDir3/foo");
Assert.assertTrue("Created file should be of type file",
isFile(fcView, new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertTrue("Target of created file should also be type file",
isFile(fcTarget, new Path(targetTestRoot,"dir3/foo")));
// Recursive Create file with missing dirs
fileContextTestHelper.createFile(fcView,
"/internalDir/linkToDir2/missingDir/miss2/foo");
Assert.assertTrue("Created file should be of type file",
isFile(fcView, new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
Assert.assertTrue("Target of created file should also be type file",
isFile(fcTarget, new Path(targetTestRoot,"dir2/missingDir/miss2/foo")));
// Delete the created file
Assert.assertTrue("Delete should succeed", fcView.delete(
new Path("/internalDir/internalDir2/linkToDir3/foo"), false));
Assert.assertFalse("Deleted File should not exist",
exists(fcView, new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertFalse("Target of deleted file should not exist",
exists(fcTarget, new Path(targetTestRoot,"dir3/foo")));
// mkdir
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
FileContext.DEFAULT_PERM, false);
Assert.assertTrue("New dir should be type dir",
isDir(fcView, new Path("/user/dirX")));
Assert.assertTrue("Target of new dir should be of type dir",
isDir(fcTarget, new Path(targetTestRoot,"user/dirX")));
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/user/dirX/dirY"),
FileContext.DEFAULT_PERM, false);
Assert.assertTrue("New dir should be type dir",
isDir(fcView, new Path("/user/dirX/dirY")));
Assert.assertTrue("Target of new dir should be of type dir",
isDir(fcTarget,new Path(targetTestRoot,"user/dirX/dirY")));
// Delete the created dir
Assert.assertTrue("Delete should succeed",
fcView.delete(new Path("/user/dirX/dirY"), false));
Assert.assertFalse("Deleted File should not exist",
exists(fcView, new Path("/user/dirX/dirY")));
Assert.assertFalse("Deleted Target should not exist",
exists(fcTarget, new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",
fcView.delete(new Path("/user/dirX"), false));
Assert.assertFalse("Deleted File should not exist",
exists(fcView, new Path("/user/dirX")));
Assert.assertFalse("Deleted Target should not exist",
exists(fcTarget, new Path(targetTestRoot,"user/dirX")));
// Rename a file
fileContextTestHelper.createFile(fcView, "/user/foo");
fcView.rename(new Path("/user/foo"), new Path("/user/fooBar"));
Assert.assertFalse("Renamed src should not exist",
exists(fcView, new Path("/user/foo")));
Assert.assertFalse(exists(fcTarget, new Path(targetTestRoot,"user/foo")));
Assert.assertTrue(isFile(fcView,
fileContextTestHelper.getTestRootPath(fcView,"/user/fooBar")));
Assert.assertTrue(isFile(fcTarget, new Path(targetTestRoot,"user/fooBar")));
fcView.mkdir(new Path("/user/dirFoo"), FileContext.DEFAULT_PERM, false);
fcView.rename(new Path("/user/dirFoo"), new Path("/user/dirFooBar"));
Assert.assertFalse("Renamed src should not exist",
exists(fcView, new Path("/user/dirFoo")));
Assert.assertFalse("Renamed src should not exist in target",
exists(fcTarget, new Path(targetTestRoot,"user/dirFoo")));
Assert.assertTrue("Renamed dest should exist as dir",
isDir(fcView,
fileContextTestHelper.getTestRootPath(fcView,"/user/dirFooBar")));
Assert.assertTrue("Renamed dest should exist as dir in target",
isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
// Make a directory under a directory that's mounted from the root of another FS
fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false);
Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo")));
boolean dirFooPresent = false;
RemoteIterator<FileStatus> dirContents = fcView.listStatus(new Path(
"/targetRoot/"));
while (dirContents.hasNext()) {
FileStatus fileStatus = dirContents.next();
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent = true;
}
}
Assert.assertTrue(dirFooPresent);
}
// rename across mount points that point to same target also fail
@Test(expected=IOException.class)
public void testRenameAcrossMounts1() throws IOException {
fileContextTestHelper.createFile(fcView, "/user/foo");
fcView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
/* - code if we had wanted this to succeed
Assert.assertFalse(exists(fc, new Path("/user/foo")));
Assert.assertFalse(exists(fclocal, new Path(targetTestRoot,"user/foo")));
Assert.assertTrue(isFile(fc,
FileContextTestHelper.getTestRootPath(fc,"/user2/fooBarBar")));
Assert.assertTrue(isFile(fclocal,
new Path(targetTestRoot,"user/fooBarBar")));
*/
}
// rename across mount points fail if the mount link targets are different
// even if the targets are part of the same target FS
@Test(expected=IOException.class)
public void testRenameAcrossMounts2() throws IOException {
fileContextTestHelper.createFile(fcView, "/user/foo");
fcView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
}
static protected boolean SupportsBlocks = false; // local fs use 1 block
// override for HDFS
@Test
public void testGetBlockLocations() throws IOException {
Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
FileContextTestHelper.createFile(fcTarget, targetFilePath, 10, 1024);
Path viewFilePath = new Path("/data/largeFile");
checkFileStatus(fcView, viewFilePath.toString(), fileType.isFile);
BlockLocation[] viewBL = fcView.getFileBlockLocations(viewFilePath,
0, 10240+100);
Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
BlockLocation[] targetBL = fcTarget.getFileBlockLocations(targetFilePath, 0, 10240+100);
compareBLs(viewBL, targetBL);
// Same test but now get it via the FileStatus Parameter
fcView.getFileBlockLocations(viewFilePath, 0, 10240+100);
targetBL = fcTarget.getFileBlockLocations(targetFilePath, 0, 10240+100);
compareBLs(viewBL, targetBL);
}
void compareBLs(BlockLocation[] viewBL, BlockLocation[] targetBL) {
Assert.assertEquals(targetBL.length, viewBL.length);
int i = 0;
for (BlockLocation vbl : viewBL) {
Assert.assertEquals(vbl.toString(), targetBL[i].toString());
Assert.assertEquals(targetBL[i].getOffset(), vbl.getOffset());
Assert.assertEquals(targetBL[i].getLength(), vbl.getLength());
i++;
}
}
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
// test list on internal dirs of mount table
@Test
public void testListOnInternalDirsOfMountTable() throws IOException {
// list on Slash
FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
FileStatus fs;
Assert.assertEquals(7, dirPaths.length);
fs = fileContextTestHelper.containsPath(fcView, "/user", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
fs = fileContextTestHelper.containsPath(fcView, "/data", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
fs = fileContextTestHelper.containsPath(fcView, "/internalDir", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir", fs.isDirectory());
fs = fileContextTestHelper.containsPath(fcView, "/danglingLink", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
fs = fileContextTestHelper.containsPath(fcView, "/linkToAFile", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
// list on internal dir
dirPaths = fcView.util().listStatus(new Path("/internalDir"));
Assert.assertEquals(2, dirPaths.length);
fs = fileContextTestHelper.containsPath(fcView,
"/internalDir/internalDir2", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs = fileContextTestHelper.containsPath(fcView,
"/internalDir/linkToDir2", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
}
@Test
public void testFileStatusOnMountLink() throws IOException {
Assert.assertTrue("Slash should appear as dir",
fcView.getFileStatus(new Path("/")).isDirectory());
checkFileStatus(fcView, "/", fileType.isDir);
checkFileStatus(fcView, "/user", fileType.isDir);
checkFileStatus(fcView, "/data", fileType.isDir);
checkFileStatus(fcView, "/internalDir", fileType.isDir);
checkFileStatus(fcView, "/internalDir/linkToDir2", fileType.isDir);
checkFileStatus(fcView, "/internalDir/internalDir2/linkToDir3", fileType.isDir);
checkFileStatus(fcView, "/linkToAFile", fileType.isFile);
try {
fcView.getFileStatus(new Path("/danglingLink"));
Assert.fail("Excepted a not found exception here");
} catch ( FileNotFoundException e) {
// as excepted
}
}
@Test
public void testGetFileChecksum() throws AccessControlException
, UnresolvedLinkException, IOException {
AbstractFileSystem mockAFS = Mockito.mock(AbstractFileSystem.class);
InodeTree.ResolveResult<AbstractFileSystem> res =
new InodeTree.ResolveResult<AbstractFileSystem>(null, mockAFS , null,
new Path("someFile"));
@SuppressWarnings("unchecked")
InodeTree<AbstractFileSystem> fsState = Mockito.mock(InodeTree.class);
Mockito.when(fsState.resolve(Mockito.anyString()
, Mockito.anyBoolean())).thenReturn(res);
ViewFs vfs = Mockito.mock(ViewFs.class);
vfs.fsState = fsState;
Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile")))
.thenCallRealMethod();
vfs.getFileChecksum(new Path("/tmp/someFile"));
Mockito.verify(mockAFS).getFileChecksum(new Path("someFile"));
}
@Test(expected=FileNotFoundException.class)
public void testgetFSonDanglingLink() throws IOException {
fcView.getFileStatus(new Path("/danglingLink"));
}
@Test(expected=FileNotFoundException.class)
public void testgetFSonNonExistingInternalDir() throws IOException {
fcView.getFileStatus(new Path("/internalDir/nonExisting"));
}
@Test
public void testgetFileLinkStatus() throws IOException {
checkFileLinkStatus(fcView, "/user", fileType.isSymlink);
checkFileLinkStatus(fcView, "/data", fileType.isSymlink);
checkFileLinkStatus(fcView, "/internalDir/linkToDir2", fileType.isSymlink);
checkFileLinkStatus(fcView, "/internalDir/internalDir2/linkToDir3",
fileType.isSymlink);
checkFileLinkStatus(fcView, "/linkToAFile", fileType.isSymlink);
checkFileLinkStatus(fcView, "/internalDir", fileType.isDir);
checkFileLinkStatus(fcView, "/internalDir/internalDir2", fileType.isDir);
}
@Test(expected=FileNotFoundException.class)
public void testgetFileLinkStatusonNonExistingInternalDir()
throws IOException {
fcView.getFileLinkStatus(new Path("/internalDir/nonExisting"));
}
@Test
public void testSymlinkTarget() throws IOException {
// get link target`
Assert.assertEquals(fcView.getLinkTarget(new Path("/user")),
(new Path(targetTestRoot,"user")));
Assert.assertEquals(fcView.getLinkTarget(new Path("/data")),
(new Path(targetTestRoot,"data")));
Assert.assertEquals(
fcView.getLinkTarget(new Path("/internalDir/linkToDir2")),
(new Path(targetTestRoot,"dir2")));
Assert.assertEquals(
fcView.getLinkTarget(new Path("/internalDir/internalDir2/linkToDir3")),
(new Path(targetTestRoot,"dir3")));
Assert.assertEquals(fcView.getLinkTarget(new Path("/linkToAFile")),
(new Path(targetTestRoot,"aFile")));
}
@Test(expected=IOException.class)
public void testgetLinkTargetOnNonLink() throws IOException {
fcView.getLinkTarget(new Path("/internalDir/internalDir2"));
}
/*
* Test resolvePath(p)
* TODO In the tests below replace
* fcView.getDefaultFileSystem().resolvePath() fcView.resolvePath()
*/
@Test
public void testResolvePathInternalPaths() throws IOException {
Assert.assertEquals(new Path("/"), fcView.resolvePath(new Path("/")));
Assert.assertEquals(new Path("/internalDir"),
fcView.resolvePath(new Path("/internalDir")));
}
@Test
public void testResolvePathMountPoints() throws IOException {
Assert.assertEquals(new Path(targetTestRoot,"user"),
fcView.resolvePath(new Path("/user")));
Assert.assertEquals(new Path(targetTestRoot,"data"),
fcView.resolvePath(new Path("/data")));
Assert.assertEquals(new Path(targetTestRoot,"dir2"),
fcView.resolvePath(new Path("/internalDir/linkToDir2")));
Assert.assertEquals(new Path(targetTestRoot,"dir3"),
fcView.resolvePath(new Path("/internalDir/internalDir2/linkToDir3")));
}
@Test
public void testResolvePathThroughMountPoints() throws IOException {
fileContextTestHelper.createFile(fcView, "/user/foo");
Assert.assertEquals(new Path(targetTestRoot,"user/foo"),
fcView.resolvePath(new Path("/user/foo")));
fcView.mkdir(
fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
FileContext.DEFAULT_PERM, false);
Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),
fcView.resolvePath(new Path("/user/dirX")));
fcView.mkdir(
fileContextTestHelper.getTestRootPath(fcView, "/user/dirX/dirY"),
FileContext.DEFAULT_PERM, false);
Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),
fcView.resolvePath(new Path("/user/dirX/dirY")));
}
@Test(expected=FileNotFoundException.class)
public void testResolvePathDanglingLink() throws IOException {
fcView.resolvePath(new Path("/danglingLink"));
}
@Test(expected=FileNotFoundException.class)
public void testResolvePathMissingThroughMountPoints() throws IOException {
fcView.resolvePath(new Path("/user/nonExisting"));
}
@Test(expected=FileNotFoundException.class)
public void testResolvePathMissingThroughMountPoints2() throws IOException {
fcView.mkdir(
fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
FileContext.DEFAULT_PERM, false);
fcView.resolvePath(new Path("/user/dirX/nonExisting"));
}
/**
* Test modify operations (create, mkdir, rename, etc)
* on internal dirs of mount table
* These operations should fail since the mount table is read-only or
* because the internal dir that it is trying to create already
* exits.
*/
// Mkdir on internal mount table should fail
@Test(expected=AccessControlException.class)
public void testInternalMkdirSlash() throws IOException {
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/"),
FileContext.DEFAULT_PERM, false);
}
@Test(expected=AccessControlException.class)
public void testInternalMkdirExisting1() throws IOException {
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/internalDir"),
FileContext.DEFAULT_PERM, false);
}
@Test(expected=AccessControlException.class)
public void testInternalMkdirExisting2() throws IOException {
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,
"/internalDir/linkToDir2"),
FileContext.DEFAULT_PERM, false);
}
@Test(expected=AccessControlException.class)
public void testInternalMkdirNew() throws IOException {
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/dirNew"),
FileContext.DEFAULT_PERM, false);
}
@Test(expected=AccessControlException.class)
public void testInternalMkdirNew2() throws IOException {
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/internalDir/dirNew"),
FileContext.DEFAULT_PERM, false);
}
// Create on internal mount table should fail
@Test(expected=AccessControlException.class)
public void testInternalCreate1() throws IOException {
fileContextTestHelper.createFileNonRecursive(fcView, "/foo"); // 1 component
}
@Test(expected=AccessControlException.class)
public void testInternalCreate2() throws IOException { // 2 component
fileContextTestHelper.createFileNonRecursive(fcView, "/internalDir/foo");
}
@Test(expected=AccessControlException.class)
public void testInternalCreateMissingDir() throws IOException {
fileContextTestHelper.createFile(fcView, "/missingDir/foo");
}
@Test(expected=AccessControlException.class)
public void testInternalCreateMissingDir2() throws IOException {
fileContextTestHelper.createFile(fcView, "/missingDir/miss2/foo");
}
@Test(expected=AccessControlException.class)
public void testInternalCreateMissingDir3() throws IOException {
fileContextTestHelper.createFile(fcView, "/internalDir/miss2/foo");
}
// Delete on internal mount table should fail
@Test(expected=FileNotFoundException.class)
public void testInternalDeleteNonExisting() throws IOException {
fcView.delete(new Path("/NonExisting"), false);
}
@Test(expected=FileNotFoundException.class)
public void testInternalDeleteNonExisting2() throws IOException {
fcView.delete(new Path("/internalDir/NonExisting"), false);
}
@Test(expected=AccessControlException.class)
public void testInternalDeleteExisting() throws IOException {
fcView.delete(new Path("/internalDir"), false);
}
@Test(expected=AccessControlException.class)
public void testInternalDeleteExisting2() throws IOException {
Assert.assertTrue("Delete of link to dir should succeed",
fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory());
fcView.delete(new Path("/internalDir/linkToDir2"), false);
}
// Rename on internal mount table should fail
@Test(expected=AccessControlException.class)
public void testInternalRename1() throws IOException {
fcView.rename(new Path("/internalDir"), new Path("/newDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRename2() throws IOException {
Assert.assertTrue("linkTODir2 should be a dir",
fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory());
fcView.rename(new Path("/internalDir/linkToDir2"),
new Path("/internalDir/dir1"));
}
@Test(expected=AccessControlException.class)
public void testInternalRename3() throws IOException {
fcView.rename(new Path("/user"), new Path("/internalDir/linkToDir2"));
}
@Test(expected=AccessControlException.class)
public void testInternalRenameToSlash() throws IOException {
fcView.rename(new Path("/internalDir/linkToDir2/foo"), new Path("/"));
}
@Test(expected=AccessControlException.class)
public void testInternalRenameFromSlash() throws IOException {
fcView.rename(new Path("/"), new Path("/bar"));
}
@Test(expected=AccessControlException.class)
public void testInternalSetOwner() throws IOException {
fcView.setOwner(new Path("/internalDir"), "foo", "bar");
}
/**
* Verify the behavior of ACL operations on paths above the root of
* any mount table entry.
*/
@Test(expected=AccessControlException.class)
public void testInternalModifyAclEntries() throws IOException {
fcView.modifyAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAclEntries() throws IOException {
fcView.removeAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveDefaultAcl() throws IOException {
fcView.removeDefaultAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAcl() throws IOException {
fcView.removeAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalSetAcl() throws IOException {
fcView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
}
@Test
public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
AclStatus aclStatus = fcView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(), currentUser.getUserName());
assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),
AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
@Test(expected=AccessControlException.class)
public void testInternalSetXAttr() throws IOException {
fcView.setXAttr(new Path("/internalDir"), "xattrName", null);
}
@Test(expected=NotInMountpointException.class)
public void testInternalGetXAttr() throws IOException {
fcView.getXAttr(new Path("/internalDir"), "xattrName");
}
@Test(expected=NotInMountpointException.class)
public void testInternalGetXAttrs() throws IOException {
fcView.getXAttrs(new Path("/internalDir"));
}
@Test(expected=NotInMountpointException.class)
public void testInternalGetXAttrsWithNames() throws IOException {
fcView.getXAttrs(new Path("/internalDir"), new ArrayList<String>());
}
@Test(expected=NotInMountpointException.class)
public void testInternalListXAttr() throws IOException {
fcView.listXAttrs(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveXAttr() throws IOException {
fcView.removeXAttr(new Path("/internalDir"), "xattrName");
}
@Test(expected = AccessControlException.class)
public void testInternalCreateSnapshot1() throws IOException {
fcView.createSnapshot(new Path("/internalDir"));
}
@Test(expected = AccessControlException.class)
public void testInternalCreateSnapshot2() throws IOException {
fcView.createSnapshot(new Path("/internalDir"), "snap1");
}
@Test(expected = AccessControlException.class)
public void testInternalRenameSnapshot() throws IOException {
fcView.renameSnapshot(new Path("/internalDir"), "snapOldName",
"snapNewName");
}
@Test(expected = AccessControlException.class)
public void testInternalDeleteSnapshot() throws IOException {
fcView.deleteSnapshot(new Path("/internalDir"), "snap1");
}
}
| 32,445 | 39.456359 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import org.apache.hadoop.fs.FileContextCreateMkdirBaseTest;
import org.junit.After;
import org.junit.Before;
public class TestFcCreateMkdirLocalFs extends
FileContextCreateMkdirBaseTest {
@Override
@Before
public void setUp() throws Exception {
fc = ViewFsTestSetup.setupForViewFsLocalFs(fileContextTestHelper);
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
ViewFsTestSetup.tearDownForViewFsLocalFs(fileContextTestHelper);
}
}
| 1,364 | 30.022727 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.EnumSet;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ChRootedFs;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestChRootedFs {
FileContextTestHelper fileContextTestHelper = new FileContextTestHelper();
FileContext fc; // The ChRoootedFs
FileContext fcTarget; //
Path chrootedTo;
@Before
public void setUp() throws Exception {
// create the test root on local_fs
fcTarget = FileContext.getLocalFSFileContext();
chrootedTo = fileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
// In case previous test was killed before cleanup
fcTarget.delete(chrootedTo, true);
fcTarget.mkdir(chrootedTo, FileContext.DEFAULT_PERM, true);
Configuration conf = new Configuration();
// ChRoot to the root of the testDirectory
fc = FileContext.getFileContext(
new ChRootedFs(fcTarget.getDefaultFileSystem(), chrootedTo), conf);
}
@After
public void tearDown() throws Exception {
fcTarget.delete(chrootedTo, true);
}
@Test
public void testBasicPaths() {
URI uri = fc.getDefaultFileSystem().getUri();
Assert.assertEquals(chrootedTo.toUri(), uri);
Assert.assertEquals(fc.makeQualified(
new Path(System.getProperty("user.home"))),
fc.getWorkingDirectory());
Assert.assertEquals(fc.makeQualified(
new Path(System.getProperty("user.home"))),
fc.getHomeDirectory());
/*
* ChRootedFs as its uri like file:///chrootRoot.
* This is questionable since path.makequalified(uri, path) ignores
* the pathPart of a uri. So our notion of chrooted URI is questionable.
* But if we were to fix Path#makeQualified() then the next test should
* have been:
Assert.assertEquals(
new Path(chrootedTo + "/foo/bar").makeQualified(
FsConstants.LOCAL_FS_URI, null),
fc.makeQualified(new Path( "/foo/bar")));
*/
Assert.assertEquals(
new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI, null),
fc.makeQualified(new Path("/foo/bar")));
}
/**
* Test modify operations (create, mkdir, delete, etc)
*
* Verify the operation via chrootedfs (ie fc) and *also* via the
* target file system (ie fclocal) that has been chrooted.
*/
@Test
public void testCreateDelete() throws IOException {
// Create file
fileContextTestHelper.createFileNonRecursive(fc, "/foo");
Assert.assertTrue(isFile(fc, new Path("/foo")));
Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo, "foo")));
// Create file with recursive dir
fileContextTestHelper.createFile(fc, "/newDir/foo");
Assert.assertTrue(isFile(fc, new Path("/newDir/foo")));
Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/foo")));
// Delete the created file
Assert.assertTrue(fc.delete(new Path("/newDir/foo"), false));
Assert.assertFalse(exists(fc, new Path("/newDir/foo")));
Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
// Create file with a 2 component dirs recursively
fileContextTestHelper.createFile(fc, "/newDir/newDir2/foo");
Assert.assertTrue(isFile(fc, new Path("/newDir/newDir2/foo")));
Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/newDir2/foo")));
// Delete the created file
Assert.assertTrue(fc.delete(new Path("/newDir/newDir2/foo"), false));
Assert.assertFalse(exists(fc, new Path("/newDir/newDir2/foo")));
Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/newDir2/foo")));
}
@Test
public void testMkdirDelete() throws IOException {
fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "/dirX"), FileContext.DEFAULT_PERM, false);
Assert.assertTrue(isDir(fc, new Path("/dirX")));
Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX")));
fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "/dirX/dirY"), FileContext.DEFAULT_PERM, false);
Assert.assertTrue(isDir(fc, new Path("/dirX/dirY")));
Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX/dirY")));
// Delete the created dir
Assert.assertTrue(fc.delete(new Path("/dirX/dirY"), false));
Assert.assertFalse(exists(fc, new Path("/dirX/dirY")));
Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fc.delete(new Path("/dirX"), false));
Assert.assertFalse(exists(fc, new Path("/dirX")));
Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"dirX")));
}
@Test
public void testRename() throws IOException {
// Rename a file
fileContextTestHelper.createFile(fc, "/newDir/foo");
fc.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
Assert.assertFalse(exists(fc, new Path("/newDir/foo")));
Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(isFile(fc, fileContextTestHelper.getTestRootPath(fc,"/newDir/fooBar")));
Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/fooBar")));
// Rename a dir
fc.mkdir(new Path("/newDir/dirFoo"), FileContext.DEFAULT_PERM, false);
fc.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
Assert.assertFalse(exists(fc, new Path("/newDir/dirFoo")));
Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/dirFoo")));
Assert.assertTrue(isDir(fc, fileContextTestHelper.getTestRootPath(fc,"/newDir/dirFooBar")));
Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"newDir/dirFooBar")));
}
/**
* We would have liked renames across file system to fail but
* Unfortunately there is not way to distinguish the two file systems
* @throws IOException
*/
@Test
public void testRenameAcrossFs() throws IOException {
fc.mkdir(new Path("/newDir/dirFoo"), FileContext.DEFAULT_PERM, true);
// the root will get interpreted to the root of the chrooted fs.
fc.rename(new Path("/newDir/dirFoo"), new Path("file:///dirFooBar"));
FileContextTestHelper.isDir(fc, new Path("/dirFooBar"));
}
@Test
public void testList() throws IOException {
FileStatus fs = fc.getFileStatus(new Path("/"));
Assert.assertTrue(fs.isDirectory());
// should return the full path not the chrooted path
Assert.assertEquals(fs.getPath(), chrootedTo);
// list on Slash
FileStatus[] dirPaths = fc.util().listStatus(new Path("/"));
Assert.assertEquals(0, dirPaths.length);
fileContextTestHelper.createFileNonRecursive(fc, "/foo");
fileContextTestHelper.createFileNonRecursive(fc, "/bar");
fc.mkdir(new Path("/dirX"), FileContext.DEFAULT_PERM, false);
fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "/dirY"),
FileContext.DEFAULT_PERM, false);
fc.mkdir(new Path("/dirX/dirXX"), FileContext.DEFAULT_PERM, false);
dirPaths = fc.util().listStatus(new Path("/"));
Assert.assertEquals(4, dirPaths.length);
// Note the the file status paths are the full paths on target
fs = fileContextTestHelper.containsPath(fcTarget, "foo", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs = fileContextTestHelper.containsPath(fcTarget, "bar", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs = fileContextTestHelper.containsPath(fcTarget, "dirX", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
fs = fileContextTestHelper.containsPath(fcTarget, "dirY", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
}
@Test
public void testWorkingDirectory() throws Exception {
// First we cd to our test root
fc.mkdir(new Path("/testWd"), FileContext.DEFAULT_PERM, false);
Path workDir = new Path("/testWd");
Path fqWd = fc.makeQualified(workDir);
fc.setWorkingDirectory(workDir);
Assert.assertEquals(fqWd, fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path("."));
Assert.assertEquals(fqWd, fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path(".."));
Assert.assertEquals(fqWd.getParent(), fc.getWorkingDirectory());
// cd using a relative path
// Go back to our test root
workDir = new Path("/testWd");
fqWd = fc.makeQualified(workDir);
fc.setWorkingDirectory(workDir);
Assert.assertEquals(fqWd, fc.getWorkingDirectory());
Path relativeDir = new Path("existingDir1");
Path absoluteDir = new Path(workDir,"existingDir1");
fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
Path fqAbsoluteDir = fc.makeQualified(absoluteDir);
fc.setWorkingDirectory(relativeDir);
Assert.assertEquals(fqAbsoluteDir, fc.getWorkingDirectory());
// cd using a absolute path
absoluteDir = new Path("/test/existingDir2");
fqAbsoluteDir = fc.makeQualified(absoluteDir);
fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(fqAbsoluteDir, fc.getWorkingDirectory());
// Now open a file relative to the wd we just set above.
Path absolutePath = new Path(absoluteDir, "foo");
fc.create(absolutePath, EnumSet.of(CreateFlag.CREATE)).close();
fc.open(new Path("foo")).close();
// Now mkdir relative to the dir we cd'ed to
fc.mkdir(new Path("newDir"), FileContext.DEFAULT_PERM, true);
Assert.assertTrue(isDir(fc, new Path(absoluteDir, "newDir")));
absoluteDir = fileContextTestHelper.getTestRootPath(fc, "nonexistingPath");
try {
fc.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
} catch (Exception e) {
// Exception as expected
}
// Try a URI
final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fc.getWorkingDirectory());
}
/*
* Test resolvePath(p)
*/
@Test
public void testResolvePath() throws IOException {
Assert.assertEquals(chrootedTo, fc.getDefaultFileSystem().resolvePath(new Path("/")));
fileContextTestHelper.createFile(fc, "/foo");
Assert.assertEquals(new Path(chrootedTo, "foo"),
fc.getDefaultFileSystem().resolvePath(new Path("/foo")));
}
@Test(expected=FileNotFoundException.class)
public void testResolvePathNonExisting() throws IOException {
fc.getDefaultFileSystem().resolvePath(new Path("/nonExisting"));
}
@Test
public void testIsValidNameValidInBaseFs() throws Exception {
AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs = new ChRootedFs(baseFs, new Path("/chroot"));
Mockito.doReturn(true).when(baseFs).isValidName(Mockito.anyString());
Assert.assertTrue(chRootedFs.isValidName("/test"));
Mockito.verify(baseFs).isValidName("/chroot/test");
}
@Test
public void testIsValidNameInvalidInBaseFs() throws Exception {
AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs = new ChRootedFs(baseFs, new Path("/chroot"));
Mockito.doReturn(false).when(baseFs).isValidName(Mockito.anyString());
Assert.assertFalse(chRootedFs.isValidName("/test"));
Mockito.verify(baseFs).isValidName("/chroot/test");
}
@Test(timeout = 30000)
public void testCreateSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path(
Path.getPathWithoutSchemeAndAuthority(chrootedTo), "snapPath");
AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs = new ChRootedFs(baseFs, chrootedTo);
Mockito.doReturn(snapRootPath).when(baseFs)
.createSnapshot(chRootedSnapRootPath, "snap1");
Assert.assertEquals(snapRootPath,
chRootedFs.createSnapshot(snapRootPath, "snap1"));
Mockito.verify(baseFs).createSnapshot(chRootedSnapRootPath, "snap1");
}
@Test(timeout = 30000)
public void testDeleteSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path(
Path.getPathWithoutSchemeAndAuthority(chrootedTo), "snapPath");
AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs = new ChRootedFs(baseFs, chrootedTo);
Mockito.doNothing().when(baseFs)
.deleteSnapshot(chRootedSnapRootPath, "snap1");
chRootedFs.deleteSnapshot(snapRootPath, "snap1");
Mockito.verify(baseFs).deleteSnapshot(chRootedSnapRootPath, "snap1");
}
@Test(timeout = 30000)
public void testRenameSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path(
Path.getPathWithoutSchemeAndAuthority(chrootedTo), "snapPath");
AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs = new ChRootedFs(baseFs, chrootedTo);
Mockito.doNothing().when(baseFs)
.renameSnapshot(chRootedSnapRootPath, "snapOldName", "snapNewName");
chRootedFs.renameSnapshot(snapRootPath, "snapOldName", "snapNewName");
Mockito.verify(baseFs).renameSnapshot(chRootedSnapRootPath, "snapOldName",
"snapNewName");
}
}
| 14,760 | 38.680108 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsURIs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FsConstants;
import org.junit.Test;
public class TestViewFsURIs {
@Test
public void testURIEmptyPath() throws Exception {
Configuration conf = new Configuration();
ConfigUtil.addLink(conf, "/user", new URI("file://foo"));
FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
}
}
| 1,285 | 34.722222 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.net.URI;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
*
* Test the ViewFsBaseTest using a viewfs with authority:
* viewfs://mountTableName/
* ie the authority is used to load a mount table.
* The authority name used is "default"
*
*/
public class TestViewFsWithAuthorityLocalFs extends ViewFsBaseTest {
URI schemeWithAuthority;
@Override
@Before
public void setUp() throws Exception {
// create the test root on local_fs
fcTarget = FileContext.getLocalFSFileContext();
super.setUp(); // this sets up conf (and fcView which we replace)
// Now create a viewfs using a mount table called "default"
// hence viewfs://default/
schemeWithAuthority =
new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null);
fcView = FileContext.getFileContext(schemeWithAuthority, conf);
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
}
@Override
@Test
public void testBasicPaths() {
Assert.assertEquals(schemeWithAuthority,
fcView.getDefaultFileSystem().getUri());
Assert.assertEquals(fcView.makeQualified(
new Path("/user/" + System.getProperty("user.name"))),
fcView.getWorkingDirectory());
Assert.assertEquals(fcView.makeQualified(
new Path("/user/" + System.getProperty("user.name"))),
fcView.getHomeDirectory());
Assert.assertEquals(
new Path("/foo/bar").makeQualified(schemeWithAuthority, null),
fcView.makeQualified(new Path("/foo/bar")));
}
}
| 2,591 | 31.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.PrintStream;
import java.io.IOException;
import java.net.URI;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.shell.CommandFormat.NotEnoughArgumentsException;
import org.junit.Test;
import org.junit.Before;
import org.junit.BeforeClass;
/**
* JUnit test class for {@link org.apache.hadoop.fs.shell.Count}
*
*/
public class TestCount {
private static final String WITH_QUOTAS = "Content summary with quotas";
private static final String NO_QUOTAS = "Content summary without quotas";
private static final String HUMAN = "human: ";
private static final String BYTES = "bytes: ";
private static Configuration conf;
private static FileSystem mockFs;
private static FileStatus fileStat;
@BeforeClass
public static void setup() {
conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
mockFs = mock(FileSystem.class);
fileStat = mock(FileStatus.class);
when(fileStat.isFile()).thenReturn(true);
}
@Before
public void resetMock() {
reset(mockFs);
}
@Test
public void processOptionsHumanReadable() {
LinkedList<String> options = new LinkedList<String>();
options.add("-h");
options.add("dummy");
Count count = new Count();
count.processOptions(options);
assertFalse(count.isShowQuotas());
assertTrue(count.isHumanReadable());
}
@Test
public void processOptionsAll() {
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-h");
options.add("-t");
options.add("SSD");
options.add("dummy");
Count count = new Count();
count.processOptions(options);
assertTrue(count.isShowQuotas());
assertTrue(count.isHumanReadable());
assertTrue(count.isShowQuotabyType());
assertEquals(1, count.getStorageTypes().size());
assertEquals(StorageType.SSD, count.getStorageTypes().get(0));
}
// check no options is handled correctly
@Test
public void processOptionsNoOptions() {
LinkedList<String> options = new LinkedList<String>();
options.add("dummy");
Count count = new Count();
count.processOptions(options);
assertFalse(count.isShowQuotas());
}
// check -q is handled correctly
@Test
public void processOptionsShowQuotas() {
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("dummy");
Count count = new Count();
count.processOptions(options);
assertTrue(count.isShowQuotas());
}
// check missing arguments is handled correctly
@Test
public void processOptionsMissingArgs() {
LinkedList<String> options = new LinkedList<String>();
Count count = new Count();
try {
count.processOptions(options);
fail("Count.processOptions - NotEnoughArgumentsException not thrown");
} catch (NotEnoughArgumentsException e) {
}
assertFalse(count.isShowQuotas());
}
// check the correct header is produced with no quotas (-v)
@Test
public void processOptionsHeaderNoQuotas() {
LinkedList<String> options = new LinkedList<String>();
options.add("-v");
options.add("dummy");
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
count.processOptions(options);
String noQuotasHeader =
// <----12----> <----12----> <-------18------->
" DIR_COUNT FILE_COUNT CONTENT_SIZE PATHNAME";
verify(out).println(noQuotasHeader);
verifyNoMoreInteractions(out);
}
// check the correct header is produced with quotas (-q -v)
@Test
public void processOptionsHeaderWithQuotas() {
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-v");
options.add("dummy");
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
count.processOptions(options);
String withQuotasHeader =
// <----12----> <-----15------> <-----15------> <-----15------>
" QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA " +
// <----12----> <----12----> <-------18------->
" DIR_COUNT FILE_COUNT CONTENT_SIZE PATHNAME";
verify(out).println(withQuotasHeader);
verifyNoMoreInteractions(out);
}
// check quotas are reported correctly
@Test
public void processPathShowQuotas() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
verify(out).println(BYTES + WITH_QUOTAS + path.toString());
verifyNoMoreInteractions(out);
}
// check counts without quotas are reported correctly
@Test
public void processPathNoQuotas() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
verify(out).println(BYTES + NO_QUOTAS + path.toString());
verifyNoMoreInteractions(out);
}
@Test
public void processPathShowQuotasHuman() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-h");
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
verify(out).println(HUMAN + WITH_QUOTAS + path.toString());
}
@Test
public void processPathNoQuotasHuman() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-h");
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
verify(out).println(HUMAN + NO_QUOTAS + path.toString());
}
@Test
public void processPathWithQuotasByStorageTypesHeader() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-v");
options.add("-t");
options.add("all");
options.add("dummy");
count.processOptions(options);
String withStorageTypeHeader =
// <----13---> <-------17------> <----13-----> <------17------->
" SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA " +
// <----13---> <-------17------>
"ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " +
"PATHNAME";
verify(out).println(withStorageTypeHeader);
verifyNoMoreInteractions(out);
}
@Test
public void processPathWithQuotasBySSDStorageTypesHeader() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-v");
options.add("-t");
options.add("SSD");
options.add("dummy");
count.processOptions(options);
String withStorageTypeHeader =
// <----13---> <-------17------>
" SSD_QUOTA REM_SSD_QUOTA " +
"PATHNAME";
verify(out).println(withStorageTypeHeader);
verifyNoMoreInteractions(out);
}
@Test
public void processPathWithQuotasByQTVH() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-t");
options.add("-v");
options.add("-h");
options.add("dummy");
count.processOptions(options);
String withStorageTypeHeader =
// <----13---> <-------17------>
" SSD_QUOTA REM_SSD_QUOTA " +
" DISK_QUOTA REM_DISK_QUOTA " +
"ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " +
"PATHNAME";
verify(out).println(withStorageTypeHeader);
verifyNoMoreInteractions(out);
}
@Test
public void processPathWithQuotasByMultipleStorageTypesContent() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-t");
options.add("SSD,DISK");
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
String withStorageType = BYTES + StorageType.SSD.toString()
+ " " + StorageType.DISK.toString() + " " + pathData.toString();
verify(out).println(withStorageType);
verifyNoMoreInteractions(out);
}
@Test
public void processPathWithQuotasByMultipleStorageTypes() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-v");
options.add("-t");
options.add("SSD,DISK");
options.add("dummy");
count.processOptions(options);
String withStorageTypeHeader =
// <----13---> <------17------->
" SSD_QUOTA REM_SSD_QUOTA " +
" DISK_QUOTA REM_DISK_QUOTA " +
"PATHNAME";
verify(out).println(withStorageTypeHeader);
verifyNoMoreInteractions(out);
}
@Test
public void getCommandName() {
Count count = new Count();
String actual = count.getCommandName();
String expected = "count";
assertEquals("Count.getCommandName", expected, actual);
}
@Test
public void isDeprecated() {
Count count = new Count();
boolean actual = count.isDeprecated();
boolean expected = false;
assertEquals("Count.isDeprecated", expected, actual);
}
@Test
public void getReplacementCommand() {
Count count = new Count();
String actual = count.getReplacementCommand();
String expected = null;
assertEquals("Count.getReplacementCommand", expected, actual);
}
@Test
public void getName() {
Count count = new Count();
String actual = count.getName();
String expected = "count";
assertEquals("Count.getName", expected, actual);
}
@Test
public void getUsage() {
Count count = new Count();
String actual = count.getUsage();
String expected = "-count [-q] [-h] [-v] [-t [<storage type>]] <path> ...";
assertEquals("Count.getUsage", expected, actual);
}
// check the correct description is returned
@Test
public void getDescription() {
Count count = new Count();
String actual = count.getDescription();
String expected =
"Count the number of directories, files and bytes under the paths\n"
+ "that match the specified file pattern. The output columns are:\n"
+ "DIR_COUNT FILE_COUNT CONTENT_SIZE PATHNAME\n"
+ "or, with the -q option:\n"
+ "QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA\n"
+ " DIR_COUNT FILE_COUNT CONTENT_SIZE PATHNAME\n"
+ "The -h option shows file sizes in human readable format.\n"
+ "The -v option displays a header line.\n"
+ "The -t option displays quota by storage types.\n"
+ "It must be used with -q option.\n"
+ "If a comma-separated list of storage types is given after the -t option, \n"
+ "it displays the quota and usage for the specified types. \n"
+ "Otherwise, it displays the quota and usage for all the storage \n"
+ "types that support quota";
assertEquals("Count.getDescription", expected, actual);
}
// mock content system
static class MockContentSummary extends ContentSummary {
@SuppressWarnings("deprecation")
// suppress warning on the usage of deprecated ContentSummary constructor
public MockContentSummary() {
}
@Override
public String toString(boolean qOption, boolean hOption,
boolean tOption, List<StorageType> types) {
if (tOption) {
StringBuffer result = new StringBuffer();
result.append(hOption ? HUMAN : BYTES);
for (StorageType type : types) {
result.append(type.toString());
result.append(" ");
}
return result.toString();
}
if (qOption) {
if (hOption) {
return (HUMAN + WITH_QUOTAS);
} else {
return (BYTES + WITH_QUOTAS);
}
} else {
if (hOption) {
return (HUMAN + NO_QUOTAS);
} else {
return (BYTES + NO_QUOTAS);
}
}
}
}
// mock file system for use in testing
static class MockFileSystem extends FilterFileSystem {
Configuration conf;
MockFileSystem() {
super(mockFs);
}
@Override
public void initialize(URI uri, Configuration conf) {
this.conf = conf;
}
@Override
public Path makeQualified(Path path) {
return path;
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
return new MockContentSummary();
}
@Override
public Configuration getConf() {
return conf;
}
}
}
| 15,782 | 28.835539 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.PathExistsException;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestMove {
static Configuration conf;
static FileSystem mockFs;
@BeforeClass
public static void setup() throws IOException, URISyntaxException {
mockFs = mock(FileSystem.class);
conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
}
@Before
public void resetMock() throws IOException {
reset(mockFs);
}
@Test
public void testMoveTargetExistsWithoutExplicitRename() throws Exception {
Path srcPath = new Path("mockfs:/file");
Path targetPath = new Path("mockfs:/fold0");
Path dupPath = new Path("mockfs:/fold0/file");
Path srcPath2 = new Path("mockfs://user/file");
Path targetPath2 = new Path("mockfs://user/fold0");
Path dupPath2 = new Path("mockfs://user/fold0/file");
InstrumentedRenameCommand cmd;
String[] cmdargs = new String[]{"mockfs:/file", "mockfs:/fold0"};
FileStatus src_fileStat, target_fileStat, dup_fileStat;
URI myuri;
src_fileStat = mock(FileStatus.class);
target_fileStat = mock(FileStatus.class);
dup_fileStat = mock(FileStatus.class);
myuri = new URI("mockfs://user");
when(src_fileStat.isDirectory()).thenReturn(false);
when(target_fileStat.isDirectory()).thenReturn(true);
when(dup_fileStat.isDirectory()).thenReturn(false);
when(src_fileStat.getPath()).thenReturn(srcPath2);
when(target_fileStat.getPath()).thenReturn(targetPath2);
when(dup_fileStat.getPath()).thenReturn(dupPath2);
when(mockFs.getFileStatus(eq(srcPath))).thenReturn(src_fileStat);
when(mockFs.getFileStatus(eq(targetPath))).thenReturn(target_fileStat);
when(mockFs.getFileStatus(eq(dupPath))).thenReturn(dup_fileStat);
when(mockFs.getFileStatus(eq(srcPath2))).thenReturn(src_fileStat);
when(mockFs.getFileStatus(eq(targetPath2))).thenReturn(target_fileStat);
when(mockFs.getFileStatus(eq(dupPath2))).thenReturn(dup_fileStat);
when(mockFs.getUri()).thenReturn(myuri);
cmd = new InstrumentedRenameCommand();
cmd.setConf(conf);
cmd.setOverwrite(true);
cmd.run(cmdargs);
// make sure command failed with the proper exception
assertTrue("Rename should have failed with path exists exception",
cmd.error instanceof PathExistsException);
}
static class MockFileSystem extends FilterFileSystem {
Configuration conf;
MockFileSystem() {
super(mockFs);
}
@Override
public void initialize(URI uri, Configuration conf) {
this.conf = conf;
}
@Override
public Path makeQualified(Path path) {
return path;
}
@Override
public Configuration getConf() {
return conf;
}
}
private static class InstrumentedRenameCommand extends MoveCommands.Rename {
public static String NAME = "InstrumentedRename";
private Exception error = null;
@Override
public void displayError(Exception e) {
error = e;
}
}
}
| 4,359 | 33.88 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathExceptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.Test;
public class TestPathExceptions {
protected String path = "some/file";
protected String error = "KABOOM";
@Test
public void testWithDefaultString() throws Exception {
PathIOException pe = new PathIOException(path);
assertEquals(new Path(path), pe.getPath());
assertEquals("`" + path + "': Input/output error", pe.getMessage());
}
@Test
public void testWithThrowable() throws Exception {
IOException ioe = new IOException("KABOOM");
PathIOException pe = new PathIOException(path, ioe);
assertEquals(new Path(path), pe.getPath());
assertEquals("`" + path + "': Input/output error: " + error, pe.getMessage());
}
@Test
public void testWithCustomString() throws Exception {
PathIOException pe = new PathIOException(path, error);
assertEquals(new Path(path), pe.getPath());
assertEquals("`" + path + "': " + error, pe.getMessage());
}
@Test
public void testRemoteExceptionUnwrap() throws Exception {
PathIOException pe;
RemoteException re;
IOException ie;
pe = new PathIOException(path);
re = new RemoteException(PathIOException.class.getName(), "test constructor1");
ie = re.unwrapRemoteException();
assertTrue(ie instanceof PathIOException);
ie = re.unwrapRemoteException(PathIOException.class);
assertTrue(ie instanceof PathIOException);
pe = new PathIOException(path, "constructor2");
re = new RemoteException(PathIOException.class.getName(), "test constructor2");
ie = re.unwrapRemoteException();
assertTrue(ie instanceof PathIOException);
ie = re.unwrapRemoteException(PathIOException.class);
assertTrue(ie instanceof PathIOException);
}
}
| 2,800 | 34.455696 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestXAttrCommands.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestXAttrCommands {
private final ByteArrayOutputStream errContent =
new ByteArrayOutputStream();
private Configuration conf = null;
private PrintStream initialStdErr;
@Before
public void setup() throws IOException {
errContent.reset();
initialStdErr = System.err;
System.setErr(new PrintStream(errContent));
conf = new Configuration();
}
@After
public void cleanUp() throws Exception {
errContent.reset();
System.setErr(initialStdErr);
}
@Test
public void testGetfattrValidations() throws Exception {
errContent.reset();
assertFalse("getfattr should fail without path",
0 == runCommand(new String[] { "-getfattr", "-d"}));
assertTrue(errContent.toString().contains("<path> is missing"));
errContent.reset();
assertFalse("getfattr should fail with extra argument",
0 == runCommand(new String[] { "-getfattr", "extra", "-d", "/test"}));
assertTrue(errContent.toString().contains("Too many arguments"));
errContent.reset();
assertFalse("getfattr should fail without \"-n name\" or \"-d\"",
0 == runCommand(new String[] { "-getfattr", "/test"}));
assertTrue(errContent.toString().contains("Must specify '-n name' or '-d' option"));
errContent.reset();
assertFalse("getfattr should fail with invalid encoding",
0 == runCommand(new String[] { "-getfattr", "-d", "-e", "aaa", "/test"}));
assertTrue(errContent.toString().contains("Invalid/unsupported encoding option specified: aaa"));
}
@Test
public void testSetfattrValidations() throws Exception {
errContent.reset();
assertFalse("setfattr should fail without path",
0 == runCommand(new String[] { "-setfattr", "-n", "user.a1" }));
assertTrue(errContent.toString().contains("<path> is missing"));
errContent.reset();
assertFalse("setfattr should fail with extra arguments",
0 == runCommand(new String[] { "-setfattr", "extra", "-n", "user.a1", "/test"}));
assertTrue(errContent.toString().contains("Too many arguments"));
errContent.reset();
assertFalse("setfattr should fail without \"-n name\" or \"-x name\"",
0 == runCommand(new String[] { "-setfattr", "/test"}));
assertTrue(errContent.toString().contains("Must specify '-n name' or '-x name' option"));
}
private int runCommand(String[] commands) throws Exception {
return ToolRunner.run(conf, new FsShell(), commands);
}
}
| 3,701 | 36.393939 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCommandFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Test;
public class TestCommandFactory {
static CommandFactory factory;
static Configuration conf = new Configuration();
static void registerCommands(CommandFactory factory) {
}
@Before
public void testSetup() {
factory = new CommandFactory(conf);
assertNotNull(factory);
}
@Test
public void testRegistration() {
assertArrayEquals(new String []{}, factory.getNames());
factory.registerCommands(TestRegistrar.class);
String [] names = factory.getNames();
assertArrayEquals(new String []{"tc1", "tc2", "tc2.1"}, names);
factory.addClass(TestCommand3.class, "tc3");
names = factory.getNames();
assertArrayEquals(new String []{"tc1", "tc2", "tc2.1", "tc3"}, names);
}
@Test
public void testGetInstances() {
factory.registerCommands(TestRegistrar.class);
Command instance;
instance = factory.getInstance("blarg");
assertNull(instance);
instance = factory.getInstance("tc1");
assertNotNull(instance);
assertEquals(TestCommand1.class, instance.getClass());
assertEquals("tc1", instance.getCommandName());
instance = factory.getInstance("tc2");
assertNotNull(instance);
assertEquals(TestCommand2.class, instance.getClass());
assertEquals("tc2", instance.getCommandName());
instance = factory.getInstance("tc2.1");
assertNotNull(instance);
assertEquals(TestCommand2.class, instance.getClass());
assertEquals("tc2.1", instance.getCommandName());
}
static class TestRegistrar {
public static void registerCommands(CommandFactory factory) {
factory.addClass(TestCommand1.class, "tc1");
factory.addClass(TestCommand2.class, "tc2", "tc2.1");
}
}
static class TestCommand1 extends FsCommand {}
static class TestCommand2 extends FsCommand {}
static class TestCommand3 extends FsCommand {}
}
| 2,837 | 31.62069 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestAclCommands.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcNoSuchMethodException;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Before;
import org.junit.Test;
public class TestAclCommands {
private Configuration conf = null;
@Before
public void setup() throws IOException {
conf = new Configuration();
}
@Test
public void testGetfaclValidations() throws Exception {
assertFalse("getfacl should fail without path",
0 == runCommand(new String[] { "-getfacl" }));
assertFalse("getfacl should fail with extra argument",
0 == runCommand(new String[] { "-getfacl", "/test", "extraArg" }));
}
@Test
public void testSetfaclValidations() throws Exception {
assertFalse("setfacl should fail without path",
0 == runCommand(new String[] { "-setfacl" }));
assertFalse("setfacl should fail without aclSpec",
0 == runCommand(new String[] { "-setfacl", "-m", "/path" }));
assertFalse("setfacl should fail with conflicting options",
0 == runCommand(new String[] { "-setfacl", "-m", "/path" }));
assertFalse("setfacl should fail with extra arguments",
0 == runCommand(new String[] { "-setfacl", "/path", "extra" }));
assertFalse("setfacl should fail with extra arguments",
0 == runCommand(new String[] { "-setfacl", "--set",
"default:user::rwx", "/path", "extra" }));
assertFalse("setfacl should fail with permissions for -x",
0 == runCommand(new String[] { "-setfacl", "-x", "user:user1:rwx",
"/path" }));
assertFalse("setfacl should fail ACL spec missing",
0 == runCommand(new String[] { "-setfacl", "-m",
"", "/path" }));
}
@Test
public void testSetfaclValidationsWithoutPermissions() throws Exception {
List<AclEntry> parsedList = new ArrayList<AclEntry>();
try {
parsedList = AclEntry.parseAclSpec("user:user1:", true);
} catch (IllegalArgumentException e) {
}
assertTrue(parsedList.size() == 0);
assertFalse("setfacl should fail with less arguments",
0 == runCommand(new String[] { "-setfacl", "-m", "user:user1:",
"/path" }));
}
@Test
public void testMultipleAclSpecParsing() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec(
"group::rwx,user:user1:rwx,user:user2:rw-,"
+ "group:group1:rw-,default:group:group1:rw-", true);
AclEntry basicAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.ALL).build();
AclEntry user1Acl = new AclEntry.Builder().setType(AclEntryType.USER)
.setPermission(FsAction.ALL).setName("user1").build();
AclEntry user2Acl = new AclEntry.Builder().setType(AclEntryType.USER)
.setPermission(FsAction.READ_WRITE).setName("user2").build();
AclEntry group1Acl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.READ_WRITE).setName("group1").build();
AclEntry defaultAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.READ_WRITE).setName("group1")
.setScope(AclEntryScope.DEFAULT).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(basicAcl);
expectedList.add(user1Acl);
expectedList.add(user2Acl);
expectedList.add(group1Acl);
expectedList.add(defaultAcl);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
@Test
public void testMultipleAclSpecParsingWithoutPermissions() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec(
"user::,user:user1:,group::,group:group1:,mask::,other::,"
+ "default:user:user1::,default:mask::", false);
AclEntry owner = new AclEntry.Builder().setType(AclEntryType.USER).build();
AclEntry namedUser = new AclEntry.Builder().setType(AclEntryType.USER)
.setName("user1").build();
AclEntry group = new AclEntry.Builder().setType(AclEntryType.GROUP).build();
AclEntry namedGroup = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setName("group1").build();
AclEntry mask = new AclEntry.Builder().setType(AclEntryType.MASK).build();
AclEntry other = new AclEntry.Builder().setType(AclEntryType.OTHER).build();
AclEntry defaultUser = new AclEntry.Builder()
.setScope(AclEntryScope.DEFAULT).setType(AclEntryType.USER)
.setName("user1").build();
AclEntry defaultMask = new AclEntry.Builder()
.setScope(AclEntryScope.DEFAULT).setType(AclEntryType.MASK).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(owner);
expectedList.add(namedUser);
expectedList.add(group);
expectedList.add(namedGroup);
expectedList.add(mask);
expectedList.add(other);
expectedList.add(defaultUser);
expectedList.add(defaultMask);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
@Test
public void testLsNoRpcForGetAclStatus() throws Exception {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
conf.setBoolean("stubfs.noRpcForGetAclStatus", true);
assertEquals("ls must succeed even if getAclStatus RPC does not exist.",
0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
@Test
public void testLsAclsUnsupported() throws Exception {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
assertEquals("ls must succeed even if FileSystem does not implement ACLs.",
0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
public static class StubFileSystem extends FileSystem {
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return null;
}
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return false;
}
public AclStatus getAclStatus(Path path) throws IOException {
if (getConf().getBoolean("stubfs.noRpcForGetAclStatus", false)) {
throw new RemoteException(RpcNoSuchMethodException.class.getName(),
"test exception");
}
return super.getAclStatus(path);
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
if (f.isRoot()) {
return new FileStatus(0, true, 0, 0, 0, f);
}
return null;
}
@Override
public URI getUri() {
return URI.create("stubfs:///");
}
@Override
public Path getWorkingDirectory() {
return null;
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE,
FsAction.READ_EXECUTE);
Path path = new Path("/foo");
FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner",
"group", path);
return new FileStatus[] { stat };
}
@Override
public boolean mkdirs(Path f, FsPermission permission)
throws IOException {
return false;
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return null;
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
return false;
}
@Override
public void setWorkingDirectory(Path dir) {
}
}
private int runCommand(String[] commands) throws Exception {
return ToolRunner.run(conf, new FsShell(), commands);
}
}
| 9,651 | 37 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.shell.CopyCommands.Cp;
import org.apache.hadoop.fs.shell.CopyCommands.Get;
import org.apache.hadoop.fs.shell.CopyCommands.Put;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestCopyPreserveFlag {
private static final int MODIFICATION_TIME = 12345000;
private static final int ACCESS_TIME = 23456000;
private static final Path DIR_FROM = new Path("d0");
private static final Path DIR_TO1 = new Path("d1");
private static final Path DIR_TO2 = new Path("d2");
private static final Path FROM = new Path(DIR_FROM, "f0");
private static final Path TO = new Path(DIR_TO1, "f1");
private static final FsPermission PERMISSIONS = new FsPermission(
FsAction.ALL,
FsAction.EXECUTE,
FsAction.READ_WRITE);
private FileSystem fs;
private Path testDir;
private Configuration conf;
@Before
public void initialize() throws Exception {
conf = new Configuration(false);
conf.set("fs.file.impl", LocalFileSystem.class.getName());
fs = FileSystem.getLocal(conf);
testDir = new Path(
System.getProperty("test.build.data", "build/test/data") + "/testStat"
);
// don't want scheme on the path, just an absolute path
testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
FileSystem.setDefaultUri(conf, fs.getUri());
fs.setWorkingDirectory(testDir);
fs.mkdirs(DIR_FROM);
fs.mkdirs(DIR_TO1);
fs.createNewFile(FROM);
FSDataOutputStream output = fs.create(FROM, true);
for(int i = 0; i < 100; ++i) {
output.writeInt(i);
output.writeChar('\n');
}
output.close();
fs.setTimes(FROM, MODIFICATION_TIME, ACCESS_TIME);
fs.setPermission(FROM, PERMISSIONS);
fs.setTimes(DIR_FROM, MODIFICATION_TIME, ACCESS_TIME);
fs.setPermission(DIR_FROM, PERMISSIONS);
}
@After
public void cleanup() throws Exception {
fs.delete(testDir, true);
fs.close();
}
private void assertAttributesPreserved(Path to) throws IOException {
FileStatus status = fs.getFileStatus(to);
assertEquals(MODIFICATION_TIME, status.getModificationTime());
assertEquals(ACCESS_TIME, status.getAccessTime());
assertEquals(PERMISSIONS, status.getPermission());
}
private void assertAttributesChanged(Path to) throws IOException {
FileStatus status = fs.getFileStatus(to);
assertNotEquals(MODIFICATION_TIME, status.getModificationTime());
assertNotEquals(ACCESS_TIME, status.getAccessTime());
assertNotEquals(PERMISSIONS, status.getPermission());
}
private void run(CommandWithDestination cmd, String... args) {
cmd.setConf(conf);
assertEquals(0, cmd.run(args));
}
@Test(timeout = 10000)
public void testPutWithP() throws Exception {
run(new Put(), "-p", FROM.toString(), TO.toString());
assertAttributesPreserved(TO);
}
@Test(timeout = 10000)
public void testPutWithoutP() throws Exception {
run(new Put(), FROM.toString(), TO.toString());
assertAttributesChanged(TO);
}
@Test(timeout = 10000)
public void testGetWithP() throws Exception {
run(new Get(), "-p", FROM.toString(), TO.toString());
assertAttributesPreserved(TO);
}
@Test(timeout = 10000)
public void testGetWithoutP() throws Exception {
run(new Get(), FROM.toString(), TO.toString());
assertAttributesChanged(TO);
}
@Test(timeout = 10000)
public void testCpWithP() throws Exception {
run(new Cp(), "-p", FROM.toString(), TO.toString());
assertAttributesPreserved(TO);
}
@Test(timeout = 10000)
public void testCpWithoutP() throws Exception {
run(new Cp(), FROM.toString(), TO.toString());
assertAttributesChanged(TO);
}
@Test(timeout = 10000)
public void testDirectoryCpWithP() throws Exception {
run(new Cp(), "-p", DIR_FROM.toString(), DIR_TO2.toString());
assertAttributesPreserved(DIR_TO2);
}
@Test(timeout = 10000)
public void testDirectoryCpWithoutP() throws Exception {
run(new Cp(), DIR_FROM.toString(), DIR_TO2.toString());
assertAttributesChanged(DIR_TO2);
}
}
| 5,399 | 32.962264 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY;
import static org.junit.Assert.*;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.LinkedList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.InOrder;
/**
* JUnit test class for {@link org.apache.hadoop.fs.shell.Ls}
*
*/
public class TestLs {
private static Configuration conf;
private static FileSystem mockFs;
private static final Date NOW = new Date();
@BeforeClass
public static void setup() throws IOException {
conf = new Configuration();
conf.set("fs.defaultFS", "mockfs:///");
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
mockFs = mock(FileSystem.class);
}
@Before
public void resetMock() throws IOException {
reset(mockFs);
AclStatus mockAclStatus = mock(AclStatus.class);
when(mockAclStatus.getEntries()).thenReturn(new ArrayList<AclEntry>());
when(mockFs.getAclStatus(any(Path.class))).thenReturn(mockAclStatus);
}
// check that default options are correct
@Test
public void processOptionsNone() throws IOException {
LinkedList<String> options = new LinkedList<String>();
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertFalse(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the -C option is recognised
@Test
public void processOptionsPathOnly() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-C");
Ls ls = new Ls();
ls.processOptions(options);
assertTrue(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertFalse(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the -d option is recognised
@Test
public void processOptionsDirectory() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-d");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertFalse(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertFalse(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the -h option is recognised
@Test
public void processOptionsHuman() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-h");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertTrue(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertFalse(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the -R option is recognised
@Test
public void processOptionsRecursive() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-R");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertTrue(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertFalse(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the -r option is recognised
@Test
public void processOptionsReverse() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-r");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertTrue(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertFalse(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the -S option is recognised
@Test
public void processOptionsSize() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-S");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertTrue(ls.isOrderSize());
assertFalse(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the -t option is recognised
@Test
public void processOptionsMtime() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertTrue(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the precedence of the -t and -S options
@Test
public void processOptionsMtimeSize() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
options.add("-S");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertTrue(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// check the precedence of the -t, -S and -r options
@Test
public void processOptionsMtimeSizeReverse() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
options.add("-S");
options.add("-r");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertTrue(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertTrue(ls.isOrderTime());
assertFalse(ls.isUseAtime());
}
// chheck the -u option is recognised
@Test
public void processOptionsAtime() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-u");
Ls ls = new Ls();
ls.processOptions(options);
assertFalse(ls.isPathOnly());
assertTrue(ls.isDirRecurse());
assertFalse(ls.isHumanReadable());
assertFalse(ls.isRecursive());
assertFalse(ls.isOrderReverse());
assertFalse(ls.isOrderSize());
assertFalse(ls.isOrderTime());
assertTrue(ls.isUseAtime());
}
// check all options is handled correctly
@Test
public void processOptionsAll() throws IOException {
LinkedList<String> options = new LinkedList<String>();
options.add("-C"); // show file path only
options.add("-d"); // directory
options.add("-h"); // human readable
options.add("-R"); // recursive
options.add("-r"); // reverse order
options.add("-t"); // time order
options.add("-S"); // size order
options.add("-u"); // show atime
Ls ls = new Ls();
ls.processOptions(options);
assertTrue(ls.isPathOnly());
assertFalse(ls.isDirRecurse());
assertTrue(ls.isHumanReadable());
assertFalse(ls.isRecursive()); // -d overrules -R
assertTrue(ls.isOrderReverse());
assertFalse(ls.isOrderSize()); // -t overrules -S
assertTrue(ls.isOrderTime());
assertTrue(ls.isUseAtime());
}
// check listing of a single file
@Test
public void processPathFile() throws IOException {
TestFile testfile = new TestFile("testDir", "testFile");
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testfile.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println(testfile.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check listing of multiple files
@Test
public void processPathFiles() throws IOException {
TestFile testfile01 = new TestFile("testDir01", "testFile01");
TestFile testfile02 = new TestFile("testDir02", "testFile02");
TestFile testfile03 = new TestFile("testDir03", "testFile03");
TestFile testfile04 = new TestFile("testDir04", "testFile04");
TestFile testfile05 = new TestFile("testDir05", "testFile05");
TestFile testfile06 = new TestFile("testDir06", "testFile06");
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testfile01.getPathData());
pathData.add(testfile02.getPathData());
pathData.add(testfile03.getPathData());
pathData.add(testfile04.getPathData());
pathData.add(testfile05.getPathData());
pathData.add(testfile06.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check listing of a single directory
@Test
public void processPathDirectory() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check listing of multiple directories
@Test
public void processPathDirectories() throws IOException {
TestFile testfile01 = new TestFile("testDirectory01", "testFile01");
TestFile testfile02 = new TestFile("testDirectory01", "testFile02");
TestFile testfile03 = new TestFile("testDirectory01", "testFile03");
TestFile testDir01 = new TestFile("", "testDirectory01");
testDir01.setIsDir(true);
testDir01.addContents(testfile01, testfile02, testfile03);
TestFile testfile04 = new TestFile("testDirectory02", "testFile04");
TestFile testfile05 = new TestFile("testDirectory02", "testFile05");
TestFile testfile06 = new TestFile("testDirectory02", "testFile06");
TestFile testDir02 = new TestFile("", "testDirectory02");
testDir02.setIsDir(true);
testDir02.addContents(testfile04, testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir01.getPathData());
pathData.add(testDir02.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 3 items");
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println("Found 3 items");
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check the default ordering
@Test
public void processPathDirOrderDefault() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
// add contents in non-lexigraphic order to show they get sorted
testDir.addContents(testfile01, testfile03, testfile05, testfile02,
testfile04, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check reverse default ordering
@Test
public void processPathDirOrderDefaultReverse() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
// add contents in non-lexigraphic order to show they get sorted
testDir.addContents(testfile01, testfile03, testfile05, testfile02,
testfile04, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-r");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check mtime ordering (-t option); most recent first in line with unix
// convention
@Test
public void processPathDirOrderMtime() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file mtime in different order to file names
testfile01.setMtime(NOW.getTime() + 10);
testfile02.setMtime(NOW.getTime() + 30);
testfile03.setMtime(NOW.getTime() + 20);
testfile04.setMtime(NOW.getTime() + 60);
testfile05.setMtime(NOW.getTime() + 50);
testfile06.setMtime(NOW.getTime() + 40);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check reverse mtime ordering (-t -r options)
@Test
public void processPathDirOrderMtimeReverse() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file mtime in different order to file names
testfile01.setMtime(NOW.getTime() + 10);
testfile02.setMtime(NOW.getTime() + 30);
testfile03.setMtime(NOW.getTime() + 20);
testfile04.setMtime(NOW.getTime() + 60);
testfile05.setMtime(NOW.getTime() + 50);
testfile06.setMtime(NOW.getTime() + 40);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
options.add("-r");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check multiple directories are order independently
@Test
public void processPathDirsOrderMtime() throws IOException {
TestFile testfile01 = new TestFile("testDirectory01", "testFile01");
TestFile testfile02 = new TestFile("testDirectory01", "testFile02");
TestFile testfile03 = new TestFile("testDirectory01", "testFile03");
TestFile testfile04 = new TestFile("testDirectory02", "testFile04");
TestFile testfile05 = new TestFile("testDirectory02", "testFile05");
TestFile testfile06 = new TestFile("testDirectory02", "testFile06");
// set file mtime in different order to file names
testfile01.setMtime(NOW.getTime() + 10);
testfile02.setMtime(NOW.getTime() + 30);
testfile03.setMtime(NOW.getTime() + 20);
testfile04.setMtime(NOW.getTime() + 60);
testfile05.setMtime(NOW.getTime() + 40);
testfile06.setMtime(NOW.getTime() + 50);
TestFile testDir01 = new TestFile("", "testDirectory01");
testDir01.setIsDir(true);
testDir01.addContents(testfile01, testfile02, testfile03);
TestFile testDir02 = new TestFile("", "testDirectory02");
testDir02.setIsDir(true);
testDir02.addContents(testfile04, testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir01.getPathData());
pathData.add(testDir02.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 3 items");
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println("Found 3 items");
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check mtime ordering with large time gaps between files (checks integer
// overflow issues)
@Test
public void processPathDirOrderMtimeYears() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file mtime in different order to file names
testfile01.setMtime(NOW.getTime() + Integer.MAX_VALUE);
testfile02.setMtime(NOW.getTime() + Integer.MIN_VALUE);
testfile03.setMtime(NOW.getTime() + 0);
testfile04.setMtime(NOW.getTime() + Integer.MAX_VALUE + Integer.MAX_VALUE);
testfile05.setMtime(NOW.getTime() + 0);
testfile06.setMtime(NOW.getTime() + Integer.MIN_VALUE + Integer.MIN_VALUE);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check length order (-S option)
@Test
public void processPathDirOrderLength() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file length in different order to file names
long length = 1234567890;
testfile01.setLength(length + 10);
testfile02.setLength(length + 30);
testfile03.setLength(length + 20);
testfile04.setLength(length + 60);
testfile05.setLength(length + 50);
testfile06.setLength(length + 40);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-S");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check reverse length order (-S -r options)
@Test
public void processPathDirOrderLengthReverse() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file length in different order to file names
long length = 1234567890;
testfile01.setLength(length + 10);
testfile02.setLength(length + 30);
testfile03.setLength(length + 20);
testfile04.setLength(length + 60);
testfile05.setLength(length + 50);
testfile06.setLength(length + 40);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-S");
options.add("-r");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check length ordering with large size gaps between files (checks integer
// overflow issues)
@Test
public void processPathDirOrderLengthLarge() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file length in different order to file names
long length = 1234567890;
testfile01.setLength(length + 3l * Integer.MAX_VALUE);
testfile02.setLength(length + Integer.MAX_VALUE);
testfile03.setLength(length + 2l * Integer.MAX_VALUE);
testfile04.setLength(length + 4l * Integer.MAX_VALUE);
testfile05.setLength(length + 2l * Integer.MAX_VALUE);
testfile06.setLength(length + 0);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-S");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check access time display (-u option)
@Test
public void processPathDirectoryAtime() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-u");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineAtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check access time order (-u -t options)
@Test
public void processPathDirOrderAtime() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file atime in different order to file names
testfile01.setAtime(NOW.getTime() + 10);
testfile02.setAtime(NOW.getTime() + 30);
testfile03.setAtime(NOW.getTime() + 20);
testfile04.setAtime(NOW.getTime() + 60);
testfile05.setAtime(NOW.getTime() + 50);
testfile06.setAtime(NOW.getTime() + 40);
// set file mtime in different order to atime
testfile01.setMtime(NOW.getTime() + 60);
testfile02.setMtime(NOW.getTime() + 50);
testfile03.setMtime(NOW.getTime() + 20);
testfile04.setMtime(NOW.getTime() + 30);
testfile05.setMtime(NOW.getTime() + 10);
testfile06.setMtime(NOW.getTime() + 40);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
options.add("-u");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile04.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineAtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check reverse access time order (-u -t -r options)
@Test
public void processPathDirOrderAtimeReverse() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file atime in different order to file names
testfile01.setAtime(NOW.getTime() + 10);
testfile02.setAtime(NOW.getTime() + 30);
testfile03.setAtime(NOW.getTime() + 20);
testfile04.setAtime(NOW.getTime() + 60);
testfile05.setAtime(NOW.getTime() + 50);
testfile06.setAtime(NOW.getTime() + 40);
// set file mtime in different order to atime
testfile01.setMtime(NOW.getTime() + 60);
testfile02.setMtime(NOW.getTime() + 50);
testfile03.setMtime(NOW.getTime() + 20);
testfile04.setMtime(NOW.getTime() + 30);
testfile05.setMtime(NOW.getTime() + 10);
testfile06.setMtime(NOW.getTime() + 40);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
options.add("-u");
options.add("-r");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineAtime(lineFormat));
verifyNoMoreInteractions(out);
}
// check path only display (-C option)
@Test
public void processPathDirectoryPathOnly() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-C");
ls.processOptions(options);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println(testfile01.getPath().toString());
inOrder.verify(out).println(testfile02.getPath().toString());
inOrder.verify(out).println(testfile03.getPath().toString());
inOrder.verify(out).println(testfile04.getPath().toString());
inOrder.verify(out).println(testfile05.getPath().toString());
inOrder.verify(out).println(testfile06.getPath().toString());
verifyNoMoreInteractions(out);
}
private static void displayWarningOnLocalFileSystem(boolean shouldDisplay)
throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(
HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY, shouldDisplay);
ByteArrayOutputStream buf = new ByteArrayOutputStream();
PrintStream err = new PrintStream(buf, true);
Ls ls = new Ls(conf);
ls.err = err;
ls.run("file:///.");
assertEquals(shouldDisplay, buf.toString().contains(
"Warning: fs.defaultFs is not set when running \"ls\" command."));
}
@Test
public void displayWarningsOnLocalFileSystem() throws IOException {
// Display warnings.
displayWarningOnLocalFileSystem(true);
// Does not display warnings.
displayWarningOnLocalFileSystem(false);
}
// check the deprecated flag isn't set
@Test
public void isDeprecated() {
Ls ls = new Ls();
boolean actual = ls.isDeprecated();
boolean expected = false;
assertEquals("Ls.isDeprecated", expected, actual);
}
// check there's no replacement command
@Test
public void getReplacementCommand() {
Ls ls = new Ls();
String actual = ls.getReplacementCommand();
String expected = null;
assertEquals("Ls.getReplacementCommand", expected, actual);
}
// check the correct name is returned
@Test
public void getName() {
Ls ls = new Ls();
String actual = ls.getName();
String expected = "ls";
assertEquals("Ls.getName", expected, actual);
}
// test class representing a file to be listed
static class TestFile {
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat(
"yyyy-MM-dd HH:mm");
private static final boolean DEFAULT_ISDIR = false;
private static final String DEFAULT_MODE = "750";
private static final int DEFAULT_REPLICATION = 3;
private static final String DEFAULT_OWNER = "test_owner";
private static final String DEFAULT_GROUP = "test_group";
private static final long DEFAULT_LENGTH = 1234567890L;
private static final long DEFAULT_MTIME = NOW.getTime() - 86400000;
private static final long DEFAULT_ATIME = NOW.getTime() + 86400000;
private static final long DEFAULT_BLOCKSIZE = 64L * 1024 * 1024;
private String dirname;
private String filename;
private boolean isDir;
private FsPermission permission;
private int replication;
private String owner;
private String group;
private long length;
private long mtime;
private long atime;
private long blocksize;
private ArrayList<FileStatus> contents = new ArrayList<FileStatus>();
private Path path = null;
private FileStatus fileStatus = null;
private PathData pathData = null;
public TestFile(String dirname, String filename) {
setDirname(dirname);
setFilename(filename);
setIsDir(DEFAULT_ISDIR);
setPermission(DEFAULT_MODE);
setReplication(DEFAULT_REPLICATION);
setOwner(DEFAULT_OWNER);
setGroup(DEFAULT_GROUP);
setLength(DEFAULT_LENGTH);
setMtime(DEFAULT_MTIME);
setAtime(DEFAULT_ATIME);
setBlocksize(DEFAULT_BLOCKSIZE);
}
public void setDirname(String dirname) {
this.dirname = dirname;
}
public void setFilename(String filename) {
this.filename = filename;
}
public void setIsDir(boolean isDir) {
this.isDir = isDir;
}
public void setPermission(String mode) {
setPermission(new FsPermission(mode));
}
public void setPermission(FsPermission permission) {
this.permission = permission;
}
public void setReplication(int replication) {
this.replication = replication;
}
public void setOwner(String owner) {
this.owner = owner;
}
public void setGroup(String group) {
this.group = group;
}
public void setLength(long length) {
this.length = length;
}
public void setMtime(long mtime) {
this.mtime = mtime;
}
public void setAtime(long atime) {
this.atime = atime;
}
public void setBlocksize(long blocksize) {
this.blocksize = blocksize;
}
public void addContents(TestFile... contents) {
for (TestFile testFile : contents) {
this.contents.add(testFile.getFileStatus());
}
}
private String getDirname() {
return this.dirname;
}
private String getFilename() {
return this.filename;
}
private String getPathname() {
return getDirname() + "/" + getFilename();
}
private boolean isDir() {
return this.isDir;
}
private boolean isFile() {
return !this.isDir();
}
private FsPermission getPermission() {
return this.permission;
}
private int getReplication() {
return this.replication;
}
private String getOwner() {
return this.owner;
}
private String getGroup() {
return this.group;
}
private long getLength() {
return this.length;
}
private long getMtime() {
return this.mtime;
}
private long getAtime() {
return this.atime;
}
private long getBlocksize() {
return this.blocksize;
}
private FileStatus[] getContents() {
return this.contents.toArray(new FileStatus[0]);
}
/**
* Returns a formated output line based on the given format mask, file
* status and file name.
*
* @param lineFormat
* format mask
* @param fileStatus
* file status
* @param fileName
* file name
* @return formated line
*/
private String formatLineMtime(String lineFormat) {
return String.format(lineFormat, (isDir() ? "d" : "-"), getPermission(),
(isFile() ? getReplication() : "-"), getOwner(), getGroup(),
String.valueOf(getLength()),
DATE_FORMAT.format(new Date(getMtime())), getPathname());
}
/**
* Returns a formated output line based on the given format mask, file
* status and file name.
*
* @param lineFormat
* format mask
* @param fileStatus
* file status
* @param fileName
* file name
* @return formated line
*/
private String formatLineAtime(String lineFormat) {
return String.format(lineFormat, (isDir() ? "d" : "-"), getPermission(),
(isFile() ? getReplication() : "-"), getOwner(), getGroup(),
String.valueOf(getLength()),
DATE_FORMAT.format(new Date(getAtime())), getPathname());
}
public FileStatus getFileStatus() {
if (fileStatus == null) {
Path path = getPath();
fileStatus = new FileStatus(getLength(), isDir(), getReplication(),
getBlocksize(), getMtime(), getAtime(), getPermission(),
getOwner(), getGroup(), path);
}
return fileStatus;
}
public Path getPath() {
if (path == null) {
if ((getDirname() != null) && (!getDirname().equals(""))) {
path = new Path(getDirname(), getFilename());
} else {
path = new Path(getFilename());
}
}
return path;
}
public PathData getPathData() throws IOException {
if (pathData == null) {
FileStatus fileStatus = getFileStatus();
Path path = getPath();
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStatus);
pathData = new PathData(path.toString(), conf);
if (getContents().length != 0) {
when(mockFs.listStatus(eq(path))).thenReturn(getContents());
}
}
return pathData;
}
/**
* Compute format string based on maximum column widths. Copied from
* Ls.adjustColumnWidths as these tests are more interested in proving
* regression rather than absolute format.
*
* @param items
* to find the max field width for each column
*/
public static String computeLineFormat(LinkedList<PathData> items) {
int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0;
for (PathData item : items) {
FileStatus stat = item.stat;
maxRepl = maxLength(maxRepl, stat.getReplication());
maxLen = maxLength(maxLen, stat.getLen());
maxOwner = maxLength(maxOwner, stat.getOwner());
maxGroup = maxLength(maxGroup, stat.getGroup());
}
StringBuilder fmt = new StringBuilder();
fmt.append("%s%s "); // permission string
fmt.append("%" + maxRepl + "s ");
// Do not use '%-0s' as a formatting conversion, since it will throw a
// a MissingFormatWidthException if it is used in String.format().
// http://docs.oracle.com/javase/1.5.0/docs/api/java/util/Formatter.html#intFlags
fmt.append((maxOwner > 0) ? "%-" + maxOwner + "s " : "%s");
fmt.append((maxGroup > 0) ? "%-" + maxGroup + "s " : "%s");
fmt.append("%" + maxLen + "s ");
fmt.append("%s %s"); // mod time & path
return fmt.toString();
}
/**
* Return the maximum of two values, treating null as 0
*
* @param n
* integer to be compared
* @param value
* value to be compared
* @return maximum of the two inputs
*/
private static int maxLength(int n, Object value) {
return Math.max(n, (value != null) ? String.valueOf(value).length() : 0);
}
}
static class MockFileSystem extends FilterFileSystem {
Configuration conf;
MockFileSystem() {
super(mockFs);
}
@Override
public void initialize(URI uri, Configuration conf) {
this.conf = conf;
}
@Override
public Path makeQualified(Path path) {
return path;
}
@Override
public Configuration getConf() {
return conf;
}
}
}
| 51,392 | 35.683084 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestPathData {
private static final String TEST_ROOT_DIR =
System.getProperty("test.build.data","build/test/data") + "/testPD";
protected Configuration conf;
protected FileSystem fs;
protected Path testDir;
@Before
public void initialize() throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testDir = new Path(TEST_ROOT_DIR);
// don't want scheme on the path, just an absolute path
testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
fs.mkdirs(testDir);
FileSystem.setDefaultUri(conf, fs.getUri());
fs.setWorkingDirectory(testDir);
fs.mkdirs(new Path("d1"));
fs.createNewFile(new Path("d1", "f1"));
fs.createNewFile(new Path("d1", "f1.1"));
fs.createNewFile(new Path("d1", "f2"));
fs.mkdirs(new Path("d2"));
fs.create(new Path("d2","f3"));
}
@After
public void cleanup() throws Exception {
fs.delete(testDir, true);
fs.close();
}
@Test (timeout = 30000)
public void testWithDirStringAndConf() throws Exception {
String dirString = "d1";
PathData item = new PathData(dirString, conf);
checkPathData(dirString, item);
// properly implementing symlink support in various commands will require
// trailing slashes to be retained
dirString = "d1/";
item = new PathData(dirString, conf);
checkPathData(dirString, item);
}
@Test (timeout = 30000)
public void testUnqualifiedUriContents() throws Exception {
String dirString = "d1";
PathData item = new PathData(dirString, conf);
PathData[] items = item.getDirectoryContents();
assertEquals(
sortedString("d1/f1", "d1/f1.1", "d1/f2"),
sortedString(items)
);
}
@Test (timeout = 30000)
public void testQualifiedUriContents() throws Exception {
String dirString = fs.makeQualified(new Path("d1")).toString();
PathData item = new PathData(dirString, conf);
PathData[] items = item.getDirectoryContents();
assertEquals(
sortedString(dirString+"/f1", dirString+"/f1.1", dirString+"/f2"),
sortedString(items)
);
}
@Test (timeout = 30000)
public void testCwdContents() throws Exception {
String dirString = Path.CUR_DIR;
PathData item = new PathData(dirString, conf);
PathData[] items = item.getDirectoryContents();
assertEquals(
sortedString("d1", "d2"),
sortedString(items)
);
}
@Test (timeout = 30000)
public void testToFile() throws Exception {
PathData item = new PathData(".", conf);
assertEquals(new File(testDir.toString()), item.toFile());
item = new PathData("d1/f1", conf);
assertEquals(new File(testDir + "/d1/f1"), item.toFile());
item = new PathData(testDir + "/d1/f1", conf);
assertEquals(new File(testDir + "/d1/f1"), item.toFile());
}
@Test (timeout = 5000)
public void testToFileRawWindowsPaths() throws Exception {
if (!Path.WINDOWS) {
return;
}
// Can we handle raw Windows paths? The files need not exist for
// these tests to succeed.
String[] winPaths = {
"n:\\",
"N:\\",
"N:\\foo",
"N:\\foo\\bar",
"N:/",
"N:/foo",
"N:/foo/bar"
};
PathData item;
for (String path : winPaths) {
item = new PathData(path, conf);
assertEquals(new File(path), item.toFile());
}
item = new PathData("foo\\bar", conf);
assertEquals(new File(testDir + "\\foo\\bar"), item.toFile());
}
@Test (timeout = 5000)
public void testInvalidWindowsPath() throws Exception {
if (!Path.WINDOWS) {
return;
}
// Verify that the following invalid paths are rejected.
String [] winPaths = {
"N:\\foo/bar"
};
for (String path : winPaths) {
try {
PathData item = new PathData(path, conf);
fail("Did not throw for invalid path " + path);
} catch (IOException ioe) {
}
}
}
@Test (timeout = 30000)
public void testAbsoluteGlob() throws Exception {
PathData[] items = PathData.expandAsGlob(testDir+"/d1/f1*", conf);
assertEquals(
sortedString(testDir+"/d1/f1", testDir+"/d1/f1.1"),
sortedString(items)
);
String absolutePathNoDriveLetter = testDir+"/d1/f1";
if (Shell.WINDOWS) {
// testDir is an absolute path with a drive letter on Windows, i.e.
// c:/some/path
// and for the test we want something like the following
// /some/path
absolutePathNoDriveLetter = absolutePathNoDriveLetter.substring(2);
}
items = PathData.expandAsGlob(absolutePathNoDriveLetter, conf);
assertEquals(
sortedString(absolutePathNoDriveLetter),
sortedString(items)
);
items = PathData.expandAsGlob(".", conf);
assertEquals(
sortedString("."),
sortedString(items)
);
}
@Test (timeout = 30000)
public void testRelativeGlob() throws Exception {
PathData[] items = PathData.expandAsGlob("d1/f1*", conf);
assertEquals(
sortedString("d1/f1", "d1/f1.1"),
sortedString(items)
);
}
@Test (timeout = 30000)
public void testRelativeGlobBack() throws Exception {
fs.setWorkingDirectory(new Path("d1"));
PathData[] items = PathData.expandAsGlob("../d2/*", conf);
assertEquals(
sortedString("../d2/f3"),
sortedString(items)
);
}
@Test (timeout = 30000)
public void testWithStringAndConfForBuggyPath() throws Exception {
String dirString = "file:///tmp";
Path tmpDir = new Path(dirString);
PathData item = new PathData(dirString, conf);
// this may fail some day if Path is fixed to not crunch the uri
// if the authority is null, however we need to test that the PathData
// toString() returns the given string, while Path toString() does
// the crunching
assertEquals("file:/tmp", tmpDir.toString());
checkPathData(dirString, item);
}
public void checkPathData(String dirString, PathData item) throws Exception {
assertEquals("checking fs", fs, item.fs);
assertEquals("checking string", dirString, item.toString());
assertEquals("checking path",
fs.makeQualified(new Path(item.toString())), item.path
);
assertTrue("checking exist", item.stat != null);
assertTrue("checking isDir", item.stat.isDirectory());
}
/* junit does a lousy job of comparing arrays
* if the array lengths differ, it just says that w/o showing contents
* this sorts the paths, and builds a string of "i:<value>, ..." suitable
* for a string compare
*/
private static String sortedString(Object ... list) {
String[] strings = new String[list.length];
for (int i=0; i < list.length; i++) {
strings[i] = String.valueOf(list[i]);
}
Arrays.sort(strings);
StringBuilder result = new StringBuilder();
for (int i=0; i < strings.length; i++) {
if (result.length() > 0) {
result.append(", ");
}
result.append(i+":<"+strings[i]+">");
}
return result.toString();
}
private static String sortedString(PathData ... items) {
return sortedString((Object[])items);
}
}
| 8,463 | 30.232472 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.IOException;
import java.io.StringWriter;
import java.lang.reflect.Method;
import java.net.URI;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* This class tests the logic for displaying the binary formats supported
* by the Text command.
*/
public class TestTextCommand {
private static final String TEST_ROOT_DIR =
System.getProperty("test.build.data", "build/test/data/") + "/testText";
private static final String AVRO_FILENAME =
new Path(TEST_ROOT_DIR, "weather.avro").toUri().getPath();
private static final String TEXT_FILENAME =
new Path(TEST_ROOT_DIR, "testtextfile.txt").toUri().getPath();
/**
* Tests whether binary Avro data files are displayed correctly.
*/
@Test (timeout = 30000)
public void testDisplayForAvroFiles() throws Exception {
String expectedOutput =
"{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
System.getProperty("line.separator") +
"{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}" +
System.getProperty("line.separator") +
"{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}" +
System.getProperty("line.separator") +
"{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}" +
System.getProperty("line.separator") +
"{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
System.getProperty("line.separator");
String output = readUsingTextCommand(AVRO_FILENAME,
generateWeatherAvroBinaryData());
assertEquals(expectedOutput, output);
}
/**
* Tests that a zero-length file is displayed correctly.
*/
@Test (timeout = 30000)
public void testEmptyTextFil() throws Exception {
byte[] emptyContents = { };
String output = readUsingTextCommand(TEXT_FILENAME, emptyContents);
assertTrue("".equals(output));
}
/**
* Tests that a one-byte file is displayed correctly.
*/
@Test (timeout = 30000)
public void testOneByteTextFil() throws Exception {
byte[] oneByteContents = { 'x' };
String output = readUsingTextCommand(TEXT_FILENAME, oneByteContents);
assertTrue(new String(oneByteContents).equals(output));
}
/**
* Tests that a one-byte file is displayed correctly.
*/
@Test (timeout = 30000)
public void testTwoByteTextFil() throws Exception {
byte[] twoByteContents = { 'x', 'y' };
String output = readUsingTextCommand(TEXT_FILENAME, twoByteContents);
assertTrue(new String(twoByteContents).equals(output));
}
// Create a file on the local file system and read it using
// the Display.Text class.
private String readUsingTextCommand(String fileName, byte[] fileContents)
throws Exception {
createFile(fileName, fileContents);
// Prepare and call the Text command's protected getInputStream method
// using reflection.
Configuration conf = new Configuration();
URI localPath = new URI(fileName);
PathData pathData = new PathData(localPath, conf);
Display.Text text = new Display.Text() {
@Override
public InputStream getInputStream(PathData item) throws IOException {
return super.getInputStream(item);
}
};
text.setConf(conf);
InputStream stream = (InputStream) text.getInputStream(pathData);
return inputStreamToString(stream);
}
private String inputStreamToString(InputStream stream) throws IOException {
StringWriter writer = new StringWriter();
IOUtils.copy(stream, writer);
return writer.toString();
}
private void createFile(String fileName, byte[] contents) throws IOException {
(new File(TEST_ROOT_DIR)).mkdir();
File file = new File(fileName);
file.createNewFile();
FileOutputStream stream = new FileOutputStream(file);
stream.write(contents);
stream.close();
}
private byte[] generateWeatherAvroBinaryData() {
// The contents of a simple binary Avro file with weather records.
byte[] contents = {
(byte) 0x4f, (byte) 0x62, (byte) 0x6a, (byte) 0x1,
(byte) 0x4, (byte) 0x14, (byte) 0x61, (byte) 0x76,
(byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x63,
(byte) 0x6f, (byte) 0x64, (byte) 0x65, (byte) 0x63,
(byte) 0x8, (byte) 0x6e, (byte) 0x75, (byte) 0x6c,
(byte) 0x6c, (byte) 0x16, (byte) 0x61, (byte) 0x76,
(byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x73,
(byte) 0x63, (byte) 0x68, (byte) 0x65, (byte) 0x6d,
(byte) 0x61, (byte) 0xf2, (byte) 0x2, (byte) 0x7b,
(byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x72, (byte) 0x65, (byte) 0x63, (byte) 0x6f,
(byte) 0x72, (byte) 0x64, (byte) 0x22, (byte) 0x2c,
(byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x57, (byte) 0x65, (byte) 0x61, (byte) 0x74,
(byte) 0x68, (byte) 0x65, (byte) 0x72, (byte) 0x22,
(byte) 0x2c, (byte) 0x22, (byte) 0x6e, (byte) 0x61,
(byte) 0x6d, (byte) 0x65, (byte) 0x73, (byte) 0x70,
(byte) 0x61, (byte) 0x63, (byte) 0x65, (byte) 0x22,
(byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x65,
(byte) 0x73, (byte) 0x74, (byte) 0x22, (byte) 0x2c,
(byte) 0x22, (byte) 0x66, (byte) 0x69, (byte) 0x65,
(byte) 0x6c, (byte) 0x64, (byte) 0x73, (byte) 0x22,
(byte) 0x3a, (byte) 0x5b, (byte) 0x7b, (byte) 0x22,
(byte) 0x6e, (byte) 0x61, (byte) 0x6d, (byte) 0x65,
(byte) 0x22, (byte) 0x3a, (byte) 0x22, (byte) 0x73,
(byte) 0x74, (byte) 0x61, (byte) 0x74, (byte) 0x69,
(byte) 0x6f, (byte) 0x6e, (byte) 0x22, (byte) 0x2c,
(byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x73, (byte) 0x74, (byte) 0x72, (byte) 0x69,
(byte) 0x6e, (byte) 0x67, (byte) 0x22, (byte) 0x7d,
(byte) 0x2c, (byte) 0x7b, (byte) 0x22, (byte) 0x6e,
(byte) 0x61, (byte) 0x6d, (byte) 0x65, (byte) 0x22,
(byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x69,
(byte) 0x6d, (byte) 0x65, (byte) 0x22, (byte) 0x2c,
(byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x6c, (byte) 0x6f, (byte) 0x6e, (byte) 0x67,
(byte) 0x22, (byte) 0x7d, (byte) 0x2c, (byte) 0x7b,
(byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x74, (byte) 0x65, (byte) 0x6d, (byte) 0x70,
(byte) 0x22, (byte) 0x2c, (byte) 0x22, (byte) 0x74,
(byte) 0x79, (byte) 0x70, (byte) 0x65, (byte) 0x22,
(byte) 0x3a, (byte) 0x22, (byte) 0x69, (byte) 0x6e,
(byte) 0x74, (byte) 0x22, (byte) 0x7d, (byte) 0x5d,
(byte) 0x2c, (byte) 0x22, (byte) 0x64, (byte) 0x6f,
(byte) 0x63, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x41, (byte) 0x20, (byte) 0x77, (byte) 0x65,
(byte) 0x61, (byte) 0x74, (byte) 0x68, (byte) 0x65,
(byte) 0x72, (byte) 0x20, (byte) 0x72, (byte) 0x65,
(byte) 0x61, (byte) 0x64, (byte) 0x69, (byte) 0x6e,
(byte) 0x67, (byte) 0x2e, (byte) 0x22, (byte) 0x7d,
(byte) 0x0, (byte) 0xb0, (byte) 0x81, (byte) 0xb3,
(byte) 0xc4, (byte) 0xa, (byte) 0xc, (byte) 0xf6,
(byte) 0x62, (byte) 0xfa, (byte) 0xc9, (byte) 0x38,
(byte) 0xfd, (byte) 0x7e, (byte) 0x52, (byte) 0x0,
(byte) 0xa7, (byte) 0xa, (byte) 0xcc, (byte) 0x1,
(byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
(byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
(byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0xff, (byte) 0xa3, (byte) 0x90,
(byte) 0xe8, (byte) 0x87, (byte) 0x24, (byte) 0x0,
(byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
(byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
(byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0xff, (byte) 0x81, (byte) 0xfb,
(byte) 0xd6, (byte) 0x87, (byte) 0x24, (byte) 0x2c,
(byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
(byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
(byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0xff, (byte) 0xa5, (byte) 0xae,
(byte) 0xc2, (byte) 0x87, (byte) 0x24, (byte) 0x15,
(byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x32,
(byte) 0x36, (byte) 0x35, (byte) 0x30, (byte) 0x2d,
(byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0xff, (byte) 0xb7, (byte) 0xa2,
(byte) 0x8b, (byte) 0x94, (byte) 0x26, (byte) 0xde,
(byte) 0x1, (byte) 0x18, (byte) 0x30, (byte) 0x31,
(byte) 0x32, (byte) 0x36, (byte) 0x35, (byte) 0x30,
(byte) 0x2d, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0x39, (byte) 0xff, (byte) 0xdb,
(byte) 0xd5, (byte) 0xf6, (byte) 0x93, (byte) 0x26,
(byte) 0x9c, (byte) 0x1, (byte) 0xb0, (byte) 0x81,
(byte) 0xb3, (byte) 0xc4, (byte) 0xa, (byte) 0xc,
(byte) 0xf6, (byte) 0x62, (byte) 0xfa, (byte) 0xc9,
(byte) 0x38, (byte) 0xfd, (byte) 0x7e, (byte) 0x52,
(byte) 0x0, (byte) 0xa7,
};
return contents;
}
}
| 10,295 | 42.627119 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.shell.CopyCommands.Put;
import org.apache.hadoop.util.Progressable;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.stubbing.OngoingStubbing;
public class TestCopy {
static Configuration conf;
static Path path = new Path("mockfs:/file");
static Path tmpPath = new Path("mockfs:/file._COPYING_");
static Put cmd;
static FileSystem mockFs;
static PathData target;
static FileStatus fileStat;
@BeforeClass
public static void setup() throws IOException {
conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
mockFs = mock(FileSystem.class);
fileStat = mock(FileStatus.class);
when(fileStat.isDirectory()).thenReturn(false);
}
@Before
public void resetMock() throws IOException {
reset(mockFs);
target = new PathData(path.toString(), conf);
cmd = new CopyCommands.Put();
cmd.setConf(conf);
}
@Test
public void testCopyStreamTarget() throws Exception {
FSDataOutputStream out = mock(FSDataOutputStream.class);
whenFsCreate().thenReturn(out);
when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat);
when(mockFs.rename(eq(tmpPath), eq(path))).thenReturn(true);
FSInputStream in = mock(FSInputStream.class);
when(in.read(any(byte[].class), anyInt(), anyInt())).thenReturn(-1);
tryCopyStream(in, true);
verify(mockFs, never()).delete(eq(path), anyBoolean());
verify(mockFs).rename(eq(tmpPath), eq(path));
verify(mockFs, never()).delete(eq(tmpPath), anyBoolean());
verify(mockFs, never()).close();
}
@Test
public void testCopyStreamTargetExists() throws Exception {
FSDataOutputStream out = mock(FSDataOutputStream.class);
whenFsCreate().thenReturn(out);
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
target.refreshStatus(); // so it's updated as existing
cmd.setOverwrite(true);
when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat);
when(mockFs.delete(eq(path), eq(false))).thenReturn(true);
when(mockFs.rename(eq(tmpPath), eq(path))).thenReturn(true);
FSInputStream in = mock(FSInputStream.class);
when(in.read(any(byte[].class), anyInt(), anyInt())).thenReturn(-1);
tryCopyStream(in, true);
verify(mockFs).delete(eq(path), anyBoolean());
verify(mockFs).rename(eq(tmpPath), eq(path));
verify(mockFs, never()).delete(eq(tmpPath), anyBoolean());
verify(mockFs, never()).close();
}
@Test
public void testInterruptedCreate() throws Exception {
whenFsCreate().thenThrow(new InterruptedIOException());
when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat);
FSDataInputStream in = mock(FSDataInputStream.class);
tryCopyStream(in, false);
verify(mockFs).delete(eq(tmpPath), anyBoolean());
verify(mockFs, never()).rename(any(Path.class), any(Path.class));
verify(mockFs, never()).delete(eq(path), anyBoolean());
verify(mockFs, never()).close();
}
@Test
public void testInterruptedCopyBytes() throws Exception {
FSDataOutputStream out = mock(FSDataOutputStream.class);
whenFsCreate().thenReturn(out);
when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat);
FSInputStream in = mock(FSInputStream.class);
// make IOUtils.copyBytes fail
when(in.read(any(byte[].class), anyInt(), anyInt())).thenThrow(
new InterruptedIOException());
tryCopyStream(in, false);
verify(mockFs).delete(eq(tmpPath), anyBoolean());
verify(mockFs, never()).rename(any(Path.class), any(Path.class));
verify(mockFs, never()).delete(eq(path), anyBoolean());
verify(mockFs, never()).close();
}
@Test
public void testInterruptedRename() throws Exception {
FSDataOutputStream out = mock(FSDataOutputStream.class);
whenFsCreate().thenReturn(out);
when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat);
when(mockFs.rename(eq(tmpPath), eq(path))).thenThrow(
new InterruptedIOException());
FSInputStream in = mock(FSInputStream.class);
when(in.read(any(byte[].class), anyInt(), anyInt())).thenReturn(-1);
tryCopyStream(in, false);
verify(mockFs).delete(eq(tmpPath), anyBoolean());
verify(mockFs).rename(eq(tmpPath), eq(path));
verify(mockFs, never()).delete(eq(path), anyBoolean());
verify(mockFs, never()).close();
}
private OngoingStubbing<FSDataOutputStream> whenFsCreate() throws IOException {
return when(mockFs.create(eq(tmpPath), any(FsPermission.class),
anyBoolean(), anyInt(), anyShort(), anyLong(),
any(Progressable.class)));
}
private void tryCopyStream(InputStream in, boolean shouldPass) {
try {
cmd.copyStreamToTarget(new FSDataInputStream(in), target);
} catch (InterruptedIOException e) {
assertFalse("copy failed", shouldPass);
} catch (Throwable e) {
assertFalse(e.getMessage(), shouldPass);
}
}
static class MockFileSystem extends FilterFileSystem {
Configuration conf;
MockFileSystem() {
super(mockFs);
}
@Override
public void initialize(URI uri, Configuration conf) {
this.conf = conf;
}
@Override
public Path makeQualified(Path path) {
return path;
}
@Override
public Configuration getConf() {
return conf;
}
}
}
| 6,793 | 35.138298 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
/** Helper methods for the find expression unit tests. */
class TestHelper {
/** Adds an argument string to an expression */
static void addArgument(Expression expr, String arg) {
expr.addArguments(new LinkedList<String>(Collections.singletonList(arg)));
}
/** Converts a command string into a list of arguments. */
static LinkedList<String> getArgs(String cmd) {
return new LinkedList<String>(Arrays.asList(cmd.split(" ")));
}
}
| 1,385 | 37.5 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.junit.Assert.*;
import org.junit.Test;
public class TestResult {
// test the PASS value
@Test(timeout = 1000)
public void testPass() {
Result result = Result.PASS;
assertTrue(result.isPass());
assertTrue(result.isDescend());
}
// test the FAIL value
@Test(timeout = 1000)
public void testFail() {
Result result = Result.FAIL;
assertFalse(result.isPass());
assertTrue(result.isDescend());
}
// test the STOP value
@Test(timeout = 1000)
public void testStop() {
Result result = Result.STOP;
assertTrue(result.isPass());
assertFalse(result.isDescend());
}
// test combine method with two PASSes
@Test(timeout = 1000)
public void combinePassPass() {
Result result = Result.PASS.combine(Result.PASS);
assertTrue(result.isPass());
assertTrue(result.isDescend());
}
// test the combine method with a PASS and a FAIL
@Test(timeout = 1000)
public void combinePassFail() {
Result result = Result.PASS.combine(Result.FAIL);
assertFalse(result.isPass());
assertTrue(result.isDescend());
}
// test the combine method with a FAIL and a PASS
@Test(timeout = 1000)
public void combineFailPass() {
Result result = Result.FAIL.combine(Result.PASS);
assertFalse(result.isPass());
assertTrue(result.isDescend());
}
// test the combine method with two FAILs
@Test(timeout = 1000)
public void combineFailFail() {
Result result = Result.FAIL.combine(Result.FAIL);
assertFalse(result.isPass());
assertTrue(result.isDescend());
}
// test the combine method with a PASS and STOP
@Test(timeout = 1000)
public void combinePassStop() {
Result result = Result.PASS.combine(Result.STOP);
assertTrue(result.isPass());
assertFalse(result.isDescend());
}
// test the combine method with a STOP and FAIL
@Test(timeout = 1000)
public void combineStopFail() {
Result result = Result.STOP.combine(Result.FAIL);
assertFalse(result.isPass());
assertFalse(result.isDescend());
}
// test the combine method with a STOP and a PASS
@Test(timeout = 1000)
public void combineStopPass() {
Result result = Result.STOP.combine(Result.PASS);
assertTrue(result.isPass());
assertFalse(result.isDescend());
}
// test the combine method with a FAIL and a STOP
@Test(timeout = 1000)
public void combineFailStop() {
Result result = Result.FAIL.combine(Result.STOP);
assertFalse(result.isPass());
assertFalse(result.isDescend());
}
// test the negation of PASS
@Test(timeout = 1000)
public void negatePass() {
Result result = Result.PASS.negate();
assertFalse(result.isPass());
assertTrue(result.isDescend());
}
// test the negation of FAIL
@Test(timeout = 1000)
public void negateFail() {
Result result = Result.FAIL.negate();
assertTrue(result.isPass());
assertTrue(result.isDescend());
}
// test the negation of STOP
@Test(timeout = 1000)
public void negateStop() {
Result result = Result.STOP.negate();
assertFalse(result.isPass());
assertFalse(result.isDescend());
}
// test equals with two PASSes
@Test(timeout = 1000)
public void equalsPass() {
Result one = Result.PASS;
Result two = Result.PASS.combine(Result.PASS);
assertEquals(one, two);
}
// test equals with two FAILs
@Test(timeout = 1000)
public void equalsFail() {
Result one = Result.FAIL;
Result two = Result.FAIL.combine(Result.FAIL);
assertEquals(one, two);
}
// test equals with two STOPS
@Test(timeout = 1000)
public void equalsStop() {
Result one = Result.STOP;
Result two = Result.STOP.combine(Result.STOP);
assertEquals(one, two);
}
// test all combinations of not equals
@Test(timeout = 1000)
public void notEquals() {
assertFalse(Result.PASS.equals(Result.FAIL));
assertFalse(Result.PASS.equals(Result.STOP));
assertFalse(Result.FAIL.equals(Result.PASS));
assertFalse(Result.FAIL.equals(Result.STOP));
assertFalse(Result.STOP.equals(Result.PASS));
assertFalse(Result.STOP.equals(Result.FAIL));
}
}
| 4,976 | 27.768786 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.IOException;
import org.apache.hadoop.fs.shell.PathData;
import org.junit.Test;
import java.io.PrintStream;
import org.apache.hadoop.fs.FileSystem;
import org.junit.Before;
public class TestPrint0 {
private FileSystem mockFs;
@Before
public void resetMock() throws IOException {
mockFs = MockFileSystem.setup();
}
// test the full path is printed to stdout with a '\0'
@Test(timeout = 1000)
public void testPrint() throws IOException {
Print.Print0 print = new Print.Print0();
PrintStream out = mock(PrintStream.class);
FindOptions options = new FindOptions();
options.setOut(out);
print.setOptions(options);
String filename = "/one/two/test";
PathData item = new PathData(filename, mockFs.getConf());
assertEquals(Result.PASS, print.apply(item, -1));
verify(out).print(filename + '\0');
verifyNoMoreInteractions(out);
}
}
| 1,826 | 31.052632 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.util.Deque;
import java.util.LinkedList;
import org.apache.hadoop.fs.shell.PathData;
import org.junit.Test;
public class TestAnd {
// test all expressions passing
@Test(timeout = 1000)
public void testPass() throws IOException {
And and = new And();
PathData pathData = mock(PathData.class);
Expression first = mock(Expression.class);
when(first.apply(pathData, -1)).thenReturn(Result.PASS);
Expression second = mock(Expression.class);
when(second.apply(pathData, -1)).thenReturn(Result.PASS);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
assertEquals(Result.PASS, and.apply(pathData, -1));
verify(first).apply(pathData, -1);
verify(second).apply(pathData, -1);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test the first expression failing
@Test(timeout = 1000)
public void testFailFirst() throws IOException {
And and = new And();
PathData pathData = mock(PathData.class);
Expression first = mock(Expression.class);
when(first.apply(pathData, -1)).thenReturn(Result.FAIL);
Expression second = mock(Expression.class);
when(second.apply(pathData, -1)).thenReturn(Result.PASS);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
assertEquals(Result.FAIL, and.apply(pathData, -1));
verify(first).apply(pathData, -1);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test the second expression failing
@Test(timeout = 1000)
public void testFailSecond() throws IOException {
And and = new And();
PathData pathData = mock(PathData.class);
Expression first = mock(Expression.class);
when(first.apply(pathData, -1)).thenReturn(Result.PASS);
Expression second = mock(Expression.class);
when(second.apply(pathData, -1)).thenReturn(Result.FAIL);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
assertEquals(Result.FAIL, and.apply(pathData, -1));
verify(first).apply(pathData, -1);
verify(second).apply(pathData, -1);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test both expressions failing
@Test(timeout = 1000)
public void testFailBoth() throws IOException {
And and = new And();
PathData pathData = mock(PathData.class);
Expression first = mock(Expression.class);
when(first.apply(pathData, -1)).thenReturn(Result.FAIL);
Expression second = mock(Expression.class);
when(second.apply(pathData, -1)).thenReturn(Result.FAIL);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
assertEquals(Result.FAIL, and.apply(pathData, -1));
verify(first).apply(pathData, -1);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test the first expression stopping
@Test(timeout = 1000)
public void testStopFirst() throws IOException {
And and = new And();
PathData pathData = mock(PathData.class);
Expression first = mock(Expression.class);
when(first.apply(pathData, -1)).thenReturn(Result.STOP);
Expression second = mock(Expression.class);
when(second.apply(pathData, -1)).thenReturn(Result.PASS);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
assertEquals(Result.STOP, and.apply(pathData, -1));
verify(first).apply(pathData, -1);
verify(second).apply(pathData, -1);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test the second expression stopping
@Test(timeout = 1000)
public void testStopSecond() throws IOException {
And and = new And();
PathData pathData = mock(PathData.class);
Expression first = mock(Expression.class);
when(first.apply(pathData, -1)).thenReturn(Result.PASS);
Expression second = mock(Expression.class);
when(second.apply(pathData, -1)).thenReturn(Result.STOP);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
assertEquals(Result.STOP, and.apply(pathData, -1));
verify(first).apply(pathData, -1);
verify(second).apply(pathData, -1);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test first expression stopping and second failing
@Test(timeout = 1000)
public void testStopFail() throws IOException {
And and = new And();
PathData pathData = mock(PathData.class);
Expression first = mock(Expression.class);
when(first.apply(pathData, -1)).thenReturn(Result.STOP);
Expression second = mock(Expression.class);
when(second.apply(pathData, -1)).thenReturn(Result.FAIL);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
assertEquals(Result.STOP.combine(Result.FAIL), and.apply(pathData, -1));
verify(first).apply(pathData, -1);
verify(second).apply(pathData, -1);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test setOptions is called on child
@Test(timeout = 1000)
public void testSetOptions() throws IOException {
And and = new And();
Expression first = mock(Expression.class);
Expression second = mock(Expression.class);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
FindOptions options = mock(FindOptions.class);
and.setOptions(options);
verify(first).setOptions(options);
verify(second).setOptions(options);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test prepare is called on child
@Test(timeout = 1000)
public void testPrepare() throws IOException {
And and = new And();
Expression first = mock(Expression.class);
Expression second = mock(Expression.class);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
and.prepare();
verify(first).prepare();
verify(second).prepare();
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
// test finish is called on child
@Test(timeout = 1000)
public void testFinish() throws IOException {
And and = new And();
Expression first = mock(Expression.class);
Expression second = mock(Expression.class);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
and.finish();
verify(first).finish();
verify(second).finish();
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
}
}
| 8,107 | 29.712121 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/MockFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
/**
* A mock {@link FileSystem} for use with the {@link Find} unit tests. Usage:
* FileSystem mockFs = MockFileSystem.setup(); Methods in the mockFs can then be
* mocked out by the test script. The {@link Configuration} can be accessed by
* mockFs.getConf(); The following methods are fixed within the class: -
* {@link FileSystem#initialize(URI,Configuration)} blank stub -
* {@link FileSystem#makeQualified(Path)} returns the passed in {@link Path} -
* {@link FileSystem#getWorkingDirectory} returns new Path("/") -
* {@link FileSystem#resolvePath(Path)} returns the passed in {@link Path}
*/
class MockFileSystem extends FilterFileSystem {
private static FileSystem mockFs = null;
/** Setup and return the underlying {@link FileSystem} mock */
static FileSystem setup() throws IOException {
if (mockFs == null) {
mockFs = mock(FileSystem.class);
}
reset(mockFs);
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "mockfs:///");
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
when(mockFs.getConf()).thenReturn(conf);
return mockFs;
}
private MockFileSystem() {
super(mockFs);
}
@Override
public void initialize(URI uri, Configuration conf) {
}
@Override
public Path makeQualified(Path path) {
return path;
}
@Override
public FileStatus[] globStatus(Path pathPattern) throws IOException {
return fs.globStatus(pathPattern);
}
@Override
public Path getWorkingDirectory() {
return new Path("/");
}
@Override
public Path resolvePath(final Path p) throws IOException {
return p;
}
}
| 2,866 | 31.954023 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import static org.mockito.Matchers.*;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.fs.shell.find.BaseExpression;
import org.apache.hadoop.fs.shell.find.Expression;
import org.apache.hadoop.fs.shell.find.Find;
import org.apache.hadoop.fs.shell.find.FindOptions;
import org.apache.hadoop.fs.shell.find.Result;
import org.junit.Before;
import org.junit.Test;
import org.mockito.InOrder;
public class TestFind {
private static FileSystem mockFs;
private static Configuration conf;
@Before
public void setup() throws IOException {
mockFs = MockFileSystem.setup();
conf = mockFs.getConf();
}
// check follow link option is recognized
@Test(timeout = 1000)
public void processOptionsFollowLink() throws IOException {
Find find = new Find();
String args = "-L path";
find.processOptions(getArgs(args));
assertTrue(find.getOptions().isFollowLink());
assertFalse(find.getOptions().isFollowArgLink());
}
// check follow arg link option is recognized
@Test(timeout = 1000)
public void processOptionsFollowArgLink() throws IOException {
Find find = new Find();
String args = "-H path";
find.processOptions(getArgs(args));
assertFalse(find.getOptions().isFollowLink());
assertTrue(find.getOptions().isFollowArgLink());
}
// check follow arg link option is recognized
@Test(timeout = 1000)
public void processOptionsFollowLinkFollowArgLink() throws IOException {
Find find = new Find();
String args = "-L -H path";
find.processOptions(getArgs(args));
assertTrue(find.getOptions().isFollowLink());
// follow link option takes precedence over follow arg link
assertFalse(find.getOptions().isFollowArgLink());
}
// check options and expressions are stripped from args leaving paths
@Test(timeout = 1000)
public void processOptionsExpression() throws IOException {
Find find = new Find();
find.setConf(conf);
String paths = "path1 path2 path3";
String args = "-L -H " + paths + " -print -name test";
LinkedList<String> argsList = getArgs(args);
find.processOptions(argsList);
LinkedList<String> pathList = getArgs(paths);
assertEquals(pathList, argsList);
}
// check print is used as the default expression
@Test(timeout = 1000)
public void processOptionsNoExpression() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path";
String expected = "Print(;)";
find.processOptions(getArgs(args));
Expression expression = find.getRootExpression();
assertEquals(expected, expression.toString());
}
// check unknown options are rejected
@Test(timeout = 1000)
public void processOptionsUnknown() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -unknown";
try {
find.processOptions(getArgs(args));
fail("Unknown expression not caught");
} catch (IOException e) {
}
}
// check unknown options are rejected when mixed with known options
@Test(timeout = 1000)
public void processOptionsKnownUnknown() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -print -unknown -print";
try {
find.processOptions(getArgs(args));
fail("Unknown expression not caught");
} catch (IOException e) {
}
}
// check no path defaults to current working directory
@Test(timeout = 1000)
public void processOptionsNoPath() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "-print";
LinkedList<String> argsList = getArgs(args);
find.processOptions(argsList);
assertEquals(Collections.singletonList(Path.CUR_DIR), argsList);
}
// check -name is handled correctly
@Test(timeout = 1000)
public void processOptionsName() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -name namemask";
String expected = "And(;Name(namemask;),Print(;))";
find.processOptions(getArgs(args));
Expression expression = find.getRootExpression();
assertEquals(expected, expression.toString());
}
// check -iname is handled correctly
@Test(timeout = 1000)
public void processOptionsIname() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -iname namemask";
String expected = "And(;Iname-Name(namemask;),Print(;))";
find.processOptions(getArgs(args));
Expression expression = find.getRootExpression();
assertEquals(expected, expression.toString());
}
// check -print is handled correctly
@Test(timeout = 1000)
public void processOptionsPrint() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -print";
String expected = "Print(;)";
find.processOptions(getArgs(args));
Expression expression = find.getRootExpression();
assertEquals(expected, expression.toString());
}
// check -print0 is handled correctly
@Test(timeout = 1000)
public void processOptionsPrint0() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -print0";
String expected = "Print0-Print(;)";
find.processOptions(getArgs(args));
Expression expression = find.getRootExpression();
assertEquals(expected, expression.toString());
}
// check an implicit and is handled correctly
@Test(timeout = 1000)
public void processOptionsNoop() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -name one -name two -print";
String expected = "And(;And(;Name(one;),Name(two;)),Print(;))";
find.processOptions(getArgs(args));
Expression expression = find.getRootExpression();
assertEquals(expected, expression.toString());
}
// check -a is handled correctly
@Test(timeout = 1000)
public void processOptionsA() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -name one -a -name two -a -print";
String expected = "And(;And(;Name(one;),Name(two;)),Print(;))";
find.processOptions(getArgs(args));
Expression expression = find.getRootExpression();
assertEquals(expected, expression.toString());
}
// check -and is handled correctly
@Test(timeout = 1000)
public void processOptionsAnd() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -name one -and -name two -and -print";
String expected = "And(;And(;Name(one;),Name(two;)),Print(;))";
find.processOptions(getArgs(args));
Expression expression = find.getRootExpression();
assertEquals(expected, expression.toString());
}
// check expressions are called in the correct order
@Test(timeout = 1000)
public void processArguments() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1, 0);
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1aa, 2);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item2, 0);
inOrder.verify(expr).apply(item3, 0);
inOrder.verify(expr).apply(item4, 0);
inOrder.verify(expr).apply(item5, 0);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5ca, 2);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1.stat);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1aa.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item2.stat);
inOrderFsCheck.verify(fsCheck).check(item3.stat);
inOrderFsCheck.verify(fsCheck).check(item4.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5ca.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
// check that directories are descended correctly when -depth is specified
@Test(timeout = 1000)
public void processArgumentsDepthFirst() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.getOptions().setDepthFirst(true);
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1aa, 2);
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item1, 0);
inOrder.verify(expr).apply(item2, 0);
inOrder.verify(expr).apply(item3, 0);
inOrder.verify(expr).apply(item4, 0);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5ca, 2);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).apply(item5, 0);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1aa.stat);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item1.stat);
inOrderFsCheck.verify(fsCheck).check(item2.stat);
inOrderFsCheck.verify(fsCheck).check(item3.stat);
inOrderFsCheck.verify(fsCheck).check(item4.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5ca.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
// check symlinks given as path arguments are processed correctly with the
// follow arg option set
@Test(timeout = 1000)
public void processArgumentsOptionFollowArg() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.getOptions().setFollowArgLink(true);
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1, 0);
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1aa, 2);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item2, 0);
inOrder.verify(expr).apply(item3, 0);
inOrder.verify(expr).apply(item4, 0);
inOrder.verify(expr).apply(item5, 0);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5ca, 2);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1.stat);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1aa.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item2.stat);
inOrderFsCheck.verify(fsCheck, times(2)).check(item3.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5ca.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
// check symlinks given as path arguments are processed correctly with the
// follow option
@Test(timeout = 1000)
public void processArgumentsOptionFollow() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.getOptions().setFollowLink(true);
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1, 0);
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1aa, 2);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item2, 0);
inOrder.verify(expr).apply(item3, 0);
inOrder.verify(expr).apply(item4, 0);
inOrder.verify(expr).apply(item5, 0);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1); // triggers infinite loop message
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5ca, 2);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5ca, 2); // following item5d symlink
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1.stat);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1aa.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item2.stat);
inOrderFsCheck.verify(fsCheck, times(2)).check(item3.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5ca.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck, times(2)).check(item5ca.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verify(err).println(
"Infinite loop ignored: " + item5b.toString() + " -> "
+ item5.toString());
verifyNoMoreInteractions(err);
}
// check minimum depth is handledfollowLink
@Test(timeout = 1000)
public void processArgumentsMinDepth() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.getOptions().setMinDepth(1);
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1aa, 2);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5ca, 2);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1aa.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5ca.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
// check maximum depth is handled
@Test(timeout = 1000)
public void processArgumentsMaxDepth() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.getOptions().setMaxDepth(1);
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1, 0);
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item2, 0);
inOrder.verify(expr).apply(item3, 0);
inOrder.verify(expr).apply(item4, 0);
inOrder.verify(expr).apply(item5, 0);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1.stat);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item2.stat);
inOrderFsCheck.verify(fsCheck).check(item3.stat);
inOrderFsCheck.verify(fsCheck).check(item4.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
// check min depth is handled when -depth is specified
@Test(timeout = 1000)
public void processArgumentsDepthFirstMinDepth() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.getOptions().setDepthFirst(true);
find.getOptions().setMinDepth(1);
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1aa, 2);
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5ca, 2);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1aa.stat);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5ca.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
// check max depth is handled when -depth is specified
@Test(timeout = 1000)
public void processArgumentsDepthFirstMaxDepth() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.getOptions().setDepthFirst(true);
find.getOptions().setMaxDepth(1);
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item1, 0);
inOrder.verify(expr).apply(item2, 0);
inOrder.verify(expr).apply(item3, 0);
inOrder.verify(expr).apply(item4, 0);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).apply(item5, 0);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item1.stat);
inOrderFsCheck.verify(fsCheck).check(item2.stat);
inOrderFsCheck.verify(fsCheck).check(item3.stat);
inOrderFsCheck.verify(fsCheck).check(item4.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
// check expressions are called in the correct order
@Test(timeout = 1000)
public void processArgumentsNoDescend() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
when(expr.apply(eq(item1a), anyInt())).thenReturn(Result.STOP);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1, 0);
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item2, 0);
inOrder.verify(expr).apply(item3, 0);
inOrder.verify(expr).apply(item4, 0);
inOrder.verify(expr).apply(item5, 0);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5ca, 2);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1.stat);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item2.stat);
inOrderFsCheck.verify(fsCheck).check(item3.stat);
inOrderFsCheck.verify(fsCheck).check(item4.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5ca.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
private interface FileStatusChecker {
public void check(FileStatus fileStatus);
}
private class TestExpression extends BaseExpression implements Expression {
private Expression expr;
private FileStatusChecker checker;
public TestExpression(Expression expr, FileStatusChecker checker) {
this.expr = expr;
this.checker = checker;
}
@Override
public Result apply(PathData item, int depth) throws IOException {
FileStatus fileStatus = getFileStatus(item, depth);
checker.check(fileStatus);
return expr.apply(item, depth);
}
@Override
public void setOptions(FindOptions options) throws IOException {
super.setOptions(options);
expr.setOptions(options);
}
@Override
public void prepare() throws IOException {
expr.prepare();
}
@Override
public void finish() throws IOException {
expr.finish();
}
}
// creates a directory structure for traversal
// item1 (directory)
// \- item1a (directory)
// \- item1aa (file)
// \- item1b (file)
// item2 (directory)
// item3 (file)
// item4 (link) -> item3
// item5 (directory)
// \- item5a (link) -> item1b
// \- item5b (link) -> item5 (infinite loop)
// \- item5c (directory)
// \- item5ca (file)
// \- item5d (link) -> item5c
// \- item5e (link) -> item5c/item5ca
private PathData item1 = null;
private PathData item1a = null;
private PathData item1aa = null;
private PathData item1b = null;
private PathData item2 = null;
private PathData item3 = null;
private PathData item4 = null;
private PathData item5 = null;
private PathData item5a = null;
private PathData item5b = null;
private PathData item5c = null;
private PathData item5ca = null;
private PathData item5d = null;
private PathData item5e = null;
private LinkedList<PathData> createDirectories() throws IOException {
item1 = createPathData("item1");
item1a = createPathData("item1/item1a");
item1aa = createPathData("item1/item1a/item1aa");
item1b = createPathData("item1/item1b");
item2 = createPathData("item2");
item3 = createPathData("item3");
item4 = createPathData("item4");
item5 = createPathData("item5");
item5a = createPathData("item5/item5a");
item5b = createPathData("item5/item5b");
item5c = createPathData("item5/item5c");
item5ca = createPathData("item5/item5c/item5ca");
item5d = createPathData("item5/item5d");
item5e = createPathData("item5/item5e");
LinkedList<PathData> args = new LinkedList<PathData>();
when(item1.stat.isDirectory()).thenReturn(true);
when(item1a.stat.isDirectory()).thenReturn(true);
when(item1aa.stat.isDirectory()).thenReturn(false);
when(item1b.stat.isDirectory()).thenReturn(false);
when(item2.stat.isDirectory()).thenReturn(true);
when(item3.stat.isDirectory()).thenReturn(false);
when(item4.stat.isDirectory()).thenReturn(false);
when(item5.stat.isDirectory()).thenReturn(true);
when(item5a.stat.isDirectory()).thenReturn(false);
when(item5b.stat.isDirectory()).thenReturn(false);
when(item5c.stat.isDirectory()).thenReturn(true);
when(item5ca.stat.isDirectory()).thenReturn(false);
when(item5d.stat.isDirectory()).thenReturn(false);
when(item5e.stat.isDirectory()).thenReturn(false);
when(mockFs.listStatus(eq(item1.path))).thenReturn(
new FileStatus[] { item1a.stat, item1b.stat });
when(mockFs.listStatus(eq(item1a.path))).thenReturn(
new FileStatus[] { item1aa.stat });
when(mockFs.listStatus(eq(item2.path))).thenReturn(new FileStatus[0]);
when(mockFs.listStatus(eq(item5.path))).thenReturn(
new FileStatus[] { item5a.stat, item5b.stat, item5c.stat, item5d.stat,
item5e.stat });
when(mockFs.listStatus(eq(item5c.path))).thenReturn(
new FileStatus[] { item5ca.stat });
when(item1.stat.isSymlink()).thenReturn(false);
when(item1a.stat.isSymlink()).thenReturn(false);
when(item1aa.stat.isSymlink()).thenReturn(false);
when(item1b.stat.isSymlink()).thenReturn(false);
when(item2.stat.isSymlink()).thenReturn(false);
when(item3.stat.isSymlink()).thenReturn(false);
when(item4.stat.isSymlink()).thenReturn(true);
when(item5.stat.isSymlink()).thenReturn(false);
when(item5a.stat.isSymlink()).thenReturn(true);
when(item5b.stat.isSymlink()).thenReturn(true);
when(item5d.stat.isSymlink()).thenReturn(true);
when(item5e.stat.isSymlink()).thenReturn(true);
when(item4.stat.getSymlink()).thenReturn(item3.path);
when(item5a.stat.getSymlink()).thenReturn(item1b.path);
when(item5b.stat.getSymlink()).thenReturn(item5.path);
when(item5d.stat.getSymlink()).thenReturn(item5c.path);
when(item5e.stat.getSymlink()).thenReturn(item5ca.path);
args.add(item1);
args.add(item2);
args.add(item3);
args.add(item4);
args.add(item5);
return args;
}
private PathData createPathData(String name) throws IOException {
Path path = new Path(name);
FileStatus fstat = mock(FileStatus.class);
when(fstat.getPath()).thenReturn(path);
when(fstat.toString()).thenReturn("fileStatus:" + name);
when(mockFs.getFileStatus(eq(path))).thenReturn(fstat);
PathData item = new PathData(path.toString(), conf);
return item;
}
private LinkedList<String> getArgs(String cmd) {
return new LinkedList<String>(Arrays.asList(cmd.split(" ")));
}
}
| 34,709 | 37.523862 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.util.Deque;
import org.apache.hadoop.fs.shell.PathData;
import org.junit.Before;
import org.junit.Test;
public class TestFilterExpression {
private Expression expr;
private FilterExpression test;
@Before
public void setup() {
expr = mock(Expression.class);
test = new FilterExpression(expr) {
};
}
// test that the child expression is correctly set
@Test(timeout = 1000)
public void expression() throws IOException {
assertEquals(expr, test.expression);
}
// test that setOptions method is called
@Test(timeout = 1000)
public void setOptions() throws IOException {
FindOptions options = mock(FindOptions.class);
test.setOptions(options);
verify(expr).setOptions(options);
verifyNoMoreInteractions(expr);
}
// test the apply method is called and the result returned
@Test(timeout = 1000)
public void apply() throws IOException {
PathData item = mock(PathData.class);
when(expr.apply(item, -1)).thenReturn(Result.PASS).thenReturn(Result.FAIL);
assertEquals(Result.PASS, test.apply(item, -1));
assertEquals(Result.FAIL, test.apply(item, -1));
verify(expr, times(2)).apply(item, -1);
verifyNoMoreInteractions(expr);
}
// test that the finish method is called
@Test(timeout = 1000)
public void finish() throws IOException {
test.finish();
verify(expr).finish();
verifyNoMoreInteractions(expr);
}
// test that the getUsage method is called
@Test(timeout = 1000)
public void getUsage() {
String[] usage = new String[] { "Usage 1", "Usage 2", "Usage 3" };
when(expr.getUsage()).thenReturn(usage);
assertArrayEquals(usage, test.getUsage());
verify(expr).getUsage();
verifyNoMoreInteractions(expr);
}
// test that the getHelp method is called
@Test(timeout = 1000)
public void getHelp() {
String[] help = new String[] { "Help 1", "Help 2", "Help 3" };
when(expr.getHelp()).thenReturn(help);
assertArrayEquals(help, test.getHelp());
verify(expr).getHelp();
verifyNoMoreInteractions(expr);
}
// test that the isAction method is called
@Test(timeout = 1000)
public void isAction() {
when(expr.isAction()).thenReturn(true).thenReturn(false);
assertTrue(test.isAction());
assertFalse(test.isAction());
verify(expr, times(2)).isAction();
verifyNoMoreInteractions(expr);
}
// test that the isOperator method is called
@Test(timeout = 1000)
public void isOperator() {
when(expr.isAction()).thenReturn(true).thenReturn(false);
assertTrue(test.isAction());
assertFalse(test.isAction());
verify(expr, times(2)).isAction();
verifyNoMoreInteractions(expr);
}
// test that the getPrecedence method is called
@Test(timeout = 1000)
public void getPrecedence() {
int precedence = 12345;
when(expr.getPrecedence()).thenReturn(precedence);
assertEquals(precedence, test.getPrecedence());
verify(expr).getPrecedence();
verifyNoMoreInteractions(expr);
}
// test that the addChildren method is called
@Test(timeout = 1000)
public void addChildren() {
@SuppressWarnings("unchecked")
Deque<Expression> expressions = mock(Deque.class);
test.addChildren(expressions);
verify(expr).addChildren(expressions);
verifyNoMoreInteractions(expr);
}
// test that the addArguments method is called
@Test(timeout = 1000)
public void addArguments() {
@SuppressWarnings("unchecked")
Deque<String> args = mock(Deque.class);
test.addArguments(args);
verify(expr).addArguments(args);
verifyNoMoreInteractions(expr);
}
}
| 4,550 | 30.171233 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.junit.Assert.*;
import static org.apache.hadoop.fs.shell.find.TestHelper.*;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.shell.PathData;
import org.junit.Before;
import org.junit.Test;
public class TestName {
private FileSystem mockFs;
private Name name;
@Before
public void resetMock() throws IOException {
mockFs = MockFileSystem.setup();
}
private void setup(String arg) throws IOException {
name = new Name();
addArgument(name, arg);
name.setOptions(new FindOptions());
name.prepare();
}
// test a matching name
@Test(timeout = 1000)
public void applyMatch() throws IOException {
setup("name");
PathData item = new PathData("/directory/path/name", mockFs.getConf());
assertEquals(Result.PASS, name.apply(item, -1));
}
// test a non-matching name
@Test(timeout = 1000)
public void applyNotMatch() throws IOException {
setup("name");
PathData item = new PathData("/directory/path/notname", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
}
// test a different case name
@Test(timeout = 1000)
public void applyMixedCase() throws IOException {
setup("name");
PathData item = new PathData("/directory/path/NaMe", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
}
// test a matching glob pattern
@Test(timeout = 1000)
public void applyGlob() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/name", mockFs.getConf());
assertEquals(Result.PASS, name.apply(item, -1));
}
// test a glob pattern with different case
@Test(timeout = 1000)
public void applyGlobMixedCase() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/NaMe", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
}
// test a non-matching glob pattern
@Test(timeout = 1000)
public void applyGlobNotMatch() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/notmatch", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
}
}
| 3,031 | 31.255319 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.junit.Assert.*;
import static org.apache.hadoop.fs.shell.find.TestHelper.*;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.shell.PathData;
import org.junit.Before;
import org.junit.Test;
public class TestIname {
private FileSystem mockFs;
private Name.Iname name;
@Before
public void resetMock() throws IOException {
mockFs = MockFileSystem.setup();
}
private void setup(String arg) throws IOException {
name = new Name.Iname();
addArgument(name, arg);
name.setOptions(new FindOptions());
name.prepare();
}
// test a matching name (same case)
@Test(timeout = 1000)
public void applyMatch() throws IOException {
setup("name");
PathData item = new PathData("/directory/path/name", mockFs.getConf());
assertEquals(Result.PASS, name.apply(item, -1));
}
// test a non-matching name
@Test(timeout = 1000)
public void applyNotMatch() throws IOException {
setup("name");
PathData item = new PathData("/directory/path/notname", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
}
// test a matching name (different case)
@Test(timeout = 1000)
public void applyMixedCase() throws IOException {
setup("name");
PathData item = new PathData("/directory/path/NaMe", mockFs.getConf());
assertEquals(Result.PASS, name.apply(item, -1));
}
// test a matching glob pattern (same case)
@Test(timeout = 1000)
public void applyGlob() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/name", mockFs.getConf());
assertEquals(Result.PASS, name.apply(item, -1));
}
// test a matching glob pattern (different case)
@Test(timeout = 1000)
public void applyGlobMixedCase() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/NaMe", mockFs.getConf());
assertEquals(Result.PASS, name.apply(item, -1));
}
// test a non-matching glob pattern
@Test(timeout = 1000)
public void applyGlobNotMatch() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/notmatch", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
}
}
| 3,085 | 31.829787 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell.find;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.IOException;
import org.apache.hadoop.fs.shell.PathData;
import org.junit.Test;
import java.io.PrintStream;
import org.apache.hadoop.fs.FileSystem;
import org.junit.Before;
public class TestPrint {
private FileSystem mockFs;
@Before
public void resetMock() throws IOException {
mockFs = MockFileSystem.setup();
}
// test the full path is printed to stdout
@Test(timeout = 1000)
public void testPrint() throws IOException {
Print print = new Print();
PrintStream out = mock(PrintStream.class);
FindOptions options = new FindOptions();
options.setOut(out);
print.setOptions(options);
String filename = "/one/two/test";
PathData item = new PathData(filename, mockFs.getConf());
assertEquals(Result.PASS, print.apply(item, -1));
verify(out).print(filename + '\n');
verifyNoMoreInteractions(out);
}
}
| 1,799 | 30.578947 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import java.io.FileNotFoundException;
import java.io.IOException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
/**
* Test creating files, overwrite options &c
*/
public abstract class AbstractContractRenameTest extends
AbstractFSContractTestBase {
@Test
public void testRenameNewFileSameDir() throws Throwable {
describe("rename a file into a new file in the same directory");
Path renameSrc = path("rename_src");
Path renameTarget = path("rename_dest");
byte[] data = dataset(256, 'a', 'z');
writeDataset(getFileSystem(), renameSrc,
data, data.length, 1024 * 1024, false);
boolean rename = rename(renameSrc, renameTarget);
assertTrue("rename("+renameSrc+", "+ renameTarget+") returned false",
rename);
ContractTestUtils.assertListStatusFinds(getFileSystem(),
renameTarget.getParent(), renameTarget);
ContractTestUtils.verifyFileContents(getFileSystem(), renameTarget, data);
}
@Test
public void testRenameNonexistentFile() throws Throwable {
describe("rename a file into a new file in the same directory");
Path missing = path("testRenameNonexistentFileSrc");
Path target = path("testRenameNonexistentFileDest");
boolean renameReturnsFalseOnFailure =
isSupported(ContractOptions.RENAME_RETURNS_FALSE_IF_SOURCE_MISSING);
mkdirs(missing.getParent());
try {
boolean renamed = rename(missing, target);
//expected an exception
if (!renameReturnsFalseOnFailure) {
String destDirLS = generateAndLogErrorListing(missing, target);
fail("expected rename(" + missing + ", " + target + " ) to fail," +
" got a result of " + renamed
+ " and a destination directory of " + destDirLS);
} else {
// at least one FS only returns false here, if that is the case
// warn but continue
getLog().warn("Rename returned {} renaming a nonexistent file", renamed);
assertFalse("Renaming a missing file returned true", renamed);
}
} catch (FileNotFoundException e) {
if (renameReturnsFalseOnFailure) {
ContractTestUtils.fail(
"Renaming a missing file unexpectedly threw an exception", e);
}
handleExpectedException(e);
} catch (IOException e) {
handleRelaxedException("rename nonexistent file",
"FileNotFoundException",
e);
}
assertPathDoesNotExist("rename nonexistent file created a destination file", target);
}
/**
* Rename test -handles filesystems that will overwrite the destination
* as well as those that do not (i.e. HDFS).
* @throws Throwable
*/
@Test
public void testRenameFileOverExistingFile() throws Throwable {
describe("Verify renaming a file onto an existing file matches expectations");
Path srcFile = path("source-256.txt");
byte[] srcData = dataset(256, 'a', 'z');
writeDataset(getFileSystem(), srcFile, srcData, srcData.length, 1024, false);
Path destFile = path("dest-512.txt");
byte[] destData = dataset(512, 'A', 'Z');
writeDataset(getFileSystem(), destFile, destData, destData.length, 1024, false);
assertIsFile(destFile);
boolean renameOverwritesDest = isSupported(RENAME_OVERWRITES_DEST);
boolean renameReturnsFalseOnRenameDestExists =
!isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS);
boolean destUnchanged = true;
try {
boolean renamed = rename(srcFile, destFile);
if (renameOverwritesDest) {
// the filesystem supports rename(file, file2) by overwriting file2
assertTrue("Rename returned false", renamed);
destUnchanged = false;
} else {
// rename is rejected by returning 'false' or throwing an exception
if (renamed && !renameReturnsFalseOnRenameDestExists) {
//expected an exception
String destDirLS = generateAndLogErrorListing(srcFile, destFile);
getLog().error("dest dir {}", destDirLS);
fail("expected rename(" + srcFile + ", " + destFile + " ) to fail," +
" but got success and destination of " + destDirLS);
}
}
} catch (FileAlreadyExistsException e) {
handleExpectedException(e);
}
// verify that the destination file is as expected based on the expected
// outcome
ContractTestUtils.verifyFileContents(getFileSystem(), destFile,
destUnchanged? destData: srcData);
}
@Test
public void testRenameDirIntoExistingDir() throws Throwable {
describe("Verify renaming a dir into an existing dir puts it underneath"
+" and leaves existing files alone");
FileSystem fs = getFileSystem();
String sourceSubdir = "source";
Path srcDir = path(sourceSubdir);
Path srcFilePath = new Path(srcDir, "source-256.txt");
byte[] srcDataset = dataset(256, 'a', 'z');
writeDataset(fs, srcFilePath, srcDataset, srcDataset.length, 1024, false);
Path destDir = path("dest");
Path destFilePath = new Path(destDir, "dest-512.txt");
byte[] destDateset = dataset(512, 'A', 'Z');
writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024, false);
assertIsFile(destFilePath);
boolean rename = rename(srcDir, destDir);
Path renamedSrc = new Path(destDir, sourceSubdir);
assertIsFile(destFilePath);
assertIsDirectory(renamedSrc);
ContractTestUtils.verifyFileContents(fs, destFilePath, destDateset);
assertTrue("rename returned false though the contents were copied", rename);
}
@Test
public void testRenameFileNonexistentDir() throws Throwable {
describe("rename a file into a new file in the same directory");
Path renameSrc = path("testRenameSrc");
Path renameTarget = path("subdir/testRenameTarget");
byte[] data = dataset(256, 'a', 'z');
writeDataset(getFileSystem(), renameSrc, data, data.length, 1024 * 1024,
false);
boolean renameCreatesDestDirs = isSupported(RENAME_CREATES_DEST_DIRS);
try {
boolean rename = rename(renameSrc, renameTarget);
if (renameCreatesDestDirs) {
assertTrue(rename);
ContractTestUtils.verifyFileContents(getFileSystem(), renameTarget, data);
} else {
assertFalse(rename);
ContractTestUtils.verifyFileContents(getFileSystem(), renameSrc, data);
}
} catch (FileNotFoundException e) {
// allowed unless that rename flag is set
assertFalse(renameCreatesDestDirs);
}
}
@Test
public void testRenameWithNonEmptySubDir() throws Throwable {
final Path renameTestDir = path("testRenameWithNonEmptySubDir");
final Path srcDir = new Path(renameTestDir, "src1");
final Path srcSubDir = new Path(srcDir, "sub");
final Path finalDir = new Path(renameTestDir, "dest");
FileSystem fs = getFileSystem();
boolean renameRemoveEmptyDest = isSupported(RENAME_REMOVE_DEST_IF_EMPTY_DIR);
ContractTestUtils.rm(fs, renameTestDir, true, false);
fs.mkdirs(srcDir);
fs.mkdirs(finalDir);
ContractTestUtils.writeTextFile(fs, new Path(srcDir, "source.txt"),
"this is the file in src dir", false);
ContractTestUtils.writeTextFile(fs, new Path(srcSubDir, "subfile.txt"),
"this is the file in src/sub dir", false);
ContractTestUtils.assertPathExists(fs, "not created in src dir",
new Path(srcDir, "source.txt"));
ContractTestUtils.assertPathExists(fs, "not created in src/sub dir",
new Path(srcSubDir, "subfile.txt"));
fs.rename(srcDir, finalDir);
// Accept both POSIX rename behavior and CLI rename behavior
if (renameRemoveEmptyDest) {
// POSIX rename behavior
ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
new Path(finalDir, "source.txt"));
ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
new Path(finalDir, "sub/subfile.txt"));
} else {
// CLI rename behavior
ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
new Path(finalDir, "src1/source.txt"));
ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
new Path(finalDir, "src1/sub/subfile.txt"));
}
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted",
new Path(srcDir, "source.txt"));
}
}
| 9,434 | 40.563877 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
/**
* Test append -if supported
*/
public abstract class AbstractContractAppendTest extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractContractAppendTest.class);
private Path testPath;
private Path target;
@Override
public void setup() throws Exception {
super.setup();
skipIfUnsupported(SUPPORTS_APPEND);
//delete the test directory
testPath = path("test");
target = new Path(testPath, "target");
}
@Test
public void testAppendToEmptyFile() throws Throwable {
touch(getFileSystem(), target);
byte[] dataset = dataset(256, 'a', 'z');
FSDataOutputStream outputStream = getFileSystem().append(target);
try {
outputStream.write(dataset);
} finally {
outputStream.close();
}
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
dataset.length);
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
}
@Test
public void testAppendNonexistentFile() throws Throwable {
try {
FSDataOutputStream out = getFileSystem().append(target);
//got here: trouble
out.close();
fail("expected a failure");
} catch (Exception e) {
//expected
handleExpectedException(e);
}
}
@Test
public void testAppendToExistingFile() throws Throwable {
byte[] original = dataset(8192, 'A', 'Z');
byte[] appended = dataset(8192, '0', '9');
createFile(getFileSystem(), target, false, original);
FSDataOutputStream outputStream = getFileSystem().append(target);
outputStream.write(appended);
outputStream.close();
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
original.length + appended.length);
ContractTestUtils.validateFileContent(bytes,
new byte[] [] { original, appended });
}
@Test
public void testAppendMissingTarget() throws Throwable {
try {
FSDataOutputStream out = getFileSystem().append(target);
//got here: trouble
out.close();
fail("expected a failure");
} catch (Exception e) {
//expected
handleExpectedException(e);
}
}
@Test
public void testRenameFileBeingAppended() throws Throwable {
touch(getFileSystem(), target);
assertPathExists("original file does not exist", target);
byte[] dataset = dataset(256, 'a', 'z');
FSDataOutputStream outputStream = getFileSystem().append(target);
outputStream.write(dataset);
Path renamed = new Path(testPath, "renamed");
rename(target, renamed);
outputStream.close();
String listing = ls(testPath);
//expected: the stream goes to the file that was being renamed, not
//the original path
assertPathExists("renamed destination file does not exist", renamed);
assertPathDoesNotExist("Source file found after rename during append:\n" +
listing, target);
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), renamed,
dataset.length);
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
}
}
| 4,474 | 33.689922 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import java.io.FileNotFoundException;
import java.io.IOException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.rm;
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
/**
* Test Seek operations
*/
public abstract class AbstractContractOpenTest extends AbstractFSContractTestBase {
private FSDataInputStream instream;
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, 4096);
return conf;
}
@Override
public void teardown() throws Exception {
IOUtils.closeStream(instream);
instream = null;
super.teardown();
}
@Test
public void testOpenReadZeroByteFile() throws Throwable {
describe("create & read a 0 byte file");
Path path = path("zero.txt");
touch(getFileSystem(), path);
instream = getFileSystem().open(path);
assertEquals(0, instream.getPos());
//expect initial read to fail
int result = instream.read();
assertMinusOne("initial byte read", result);
}
@Test
public void testFsIsEncrypted() throws Exception {
describe("create an empty file and call FileStatus.isEncrypted()");
final Path path = path("file");
createFile(getFileSystem(), path, false, new byte[0]);
final FileStatus stat = getFileSystem().getFileStatus(path);
assertFalse("Expecting false for stat.isEncrypted()",
stat.isEncrypted());
}
@Test
public void testOpenReadDir() throws Throwable {
describe("create & read a directory");
Path path = path("zero.dir");
mkdirs(path);
try {
instream = getFileSystem().open(path);
//at this point we've opened a directory
fail("A directory has been opened for reading");
} catch (FileNotFoundException e) {
handleExpectedException(e);
} catch (IOException e) {
handleRelaxedException("opening a directory for reading",
"FileNotFoundException",
e);
}
}
@Test
public void testOpenReadDirWithChild() throws Throwable {
describe("create & read a directory which has a child");
Path path = path("zero.dir");
mkdirs(path);
Path path2 = new Path(path, "child");
mkdirs(path2);
try {
instream = getFileSystem().open(path);
//at this point we've opened a directory
fail("A directory has been opened for reading");
} catch (FileNotFoundException e) {
handleExpectedException(e);
} catch (IOException e) {
handleRelaxedException("opening a directory for reading",
"FileNotFoundException",
e);
}
}
@Test
public void testOpenFileTwice() throws Throwable {
describe("verify that two opened file streams are independent");
Path path = path("testopenfiletwice.txt");
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value
createFile(getFileSystem(), path, false, block);
//open first
FSDataInputStream instream1 = getFileSystem().open(path);
int c = instream1.read();
assertEquals(0,c);
FSDataInputStream instream2 = null;
try {
instream2 = getFileSystem().open(path);
assertEquals("first read of instream 2", 0, instream2.read());
assertEquals("second read of instream 1", 1, instream1.read());
instream1.close();
assertEquals("second read of instream 2", 1, instream2.read());
//close instream1 again
instream1.close();
} finally {
IOUtils.closeStream(instream1);
IOUtils.closeStream(instream2);
}
}
@Test
public void testSequentialRead() throws Throwable {
describe("verify that sequential read() operations return values");
Path path = path("testsequentialread.txt");
int len = 4;
int base = 0x40; // 64
byte[] block = dataset(len, base, base + len);
//this file now has a simple rule: offset => (value | 0x40)
createFile(getFileSystem(), path, false, block);
//open first
instream = getFileSystem().open(path);
assertEquals(base, instream.read());
assertEquals(base + 1, instream.read());
assertEquals(base + 2, instream.read());
assertEquals(base + 3, instream.read());
// and now, failures
assertEquals(-1, instream.read());
assertEquals(-1, instream.read());
instream.close();
}
}
| 5,750 | 33.232143 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.junit.internal.AssumptionViolatedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Properties;
import java.util.UUID;
/**
* Utilities used across test cases
*/
public class ContractTestUtils extends Assert {
private static final Logger LOG =
LoggerFactory.getLogger(ContractTestUtils.class);
public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
// For scale testing, we can repeatedly write small chunk data to generate
// a large file.
public static final String IO_CHUNK_BUFFER_SIZE = "io.chunk.buffer.size";
public static final int DEFAULT_IO_CHUNK_BUFFER_SIZE = 128;
public static final String IO_CHUNK_MODULUS_SIZE = "io.chunk.modulus.size";
public static final int DEFAULT_IO_CHUNK_MODULUS_SIZE = 128;
/**
* Assert that a property in the property set matches the expected value
* @param props property set
* @param key property name
* @param expected expected value. If null, the property must not be in the set
*/
public static void assertPropertyEquals(Properties props,
String key,
String expected) {
String val = props.getProperty(key);
if (expected == null) {
assertNull("Non null property " + key + " = " + val, val);
} else {
assertEquals("property " + key + " = " + val,
expected,
val);
}
}
/**
*
* Write a file and read it in, validating the result. Optional flags control
* whether file overwrite operations should be enabled, and whether the
* file should be deleted afterwards.
*
* If there is a mismatch between what was written and what was expected,
* a small range of bytes either side of the first error are logged to aid
* diagnosing what problem occurred -whether it was a previous file
* or a corrupting of the current file. This assumes that two
* sequential runs to the same path use datasets with different character
* moduli.
*
* @param fs filesystem
* @param path path to write to
* @param len length of data
* @param overwrite should the create option allow overwrites?
* @param delete should the file be deleted afterwards? -with a verification
* that it worked. Deletion is not attempted if an assertion has failed
* earlier -it is not in a <code>finally{}</code> block.
* @throws IOException IO problems
*/
public static void writeAndRead(FileSystem fs,
Path path,
byte[] src,
int len,
int blocksize,
boolean overwrite,
boolean delete) throws IOException {
fs.mkdirs(path.getParent());
writeDataset(fs, path, src, len, blocksize, overwrite);
byte[] dest = readDataset(fs, path, len);
compareByteArrays(src, dest, len);
if (delete) {
rejectRootOperation(path);
boolean deleted = fs.delete(path, false);
assertTrue("Deleted", deleted);
assertPathDoesNotExist(fs, "Cleanup failed", path);
}
}
/**
* Write a file.
* Optional flags control
* whether file overwrite operations should be enabled
* @param fs filesystem
* @param path path to write to
* @param len length of data
* @param overwrite should the create option allow overwrites?
* @throws IOException IO problems
*/
public static void writeDataset(FileSystem fs,
Path path,
byte[] src,
int len,
int buffersize,
boolean overwrite) throws IOException {
assertTrue(
"Not enough data in source array to write " + len + " bytes",
src.length >= len);
FSDataOutputStream out = fs.create(path,
overwrite,
fs.getConf()
.getInt(IO_FILE_BUFFER_SIZE,
4096),
(short) 1,
buffersize);
out.write(src, 0, len);
out.close();
assertFileHasLength(fs, path, len);
}
/**
* Read the file and convert to a byte dataset.
* This implements readfully internally, so that it will read
* in the file without ever having to seek()
* @param fs filesystem
* @param path path to read from
* @param len length of data to read
* @return the bytes
* @throws IOException IO problems
*/
public static byte[] readDataset(FileSystem fs, Path path, int len)
throws IOException {
FSDataInputStream in = fs.open(path);
byte[] dest = new byte[len];
int offset =0;
int nread = 0;
try {
while (nread < len) {
int nbytes = in.read(dest, offset + nread, len - nread);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
} finally {
in.close();
}
return dest;
}
/**
* Read a file, verify its length and contents match the expected array
* @param fs filesystem
* @param path path to file
* @param original original dataset
* @throws IOException IO Problems
*/
public static void verifyFileContents(FileSystem fs,
Path path,
byte[] original) throws IOException {
FileStatus stat = fs.getFileStatus(path);
String statText = stat.toString();
assertTrue("not a file " + statText, stat.isFile());
assertEquals("wrong length " + statText, original.length, stat.getLen());
byte[] bytes = readDataset(fs, path, original.length);
compareByteArrays(original,bytes,original.length);
}
/**
* Verify that the read at a specific offset in a stream
* matches that expected
* @param stm stream
* @param fileContents original file contents
* @param seekOff seek offset
* @param toRead number of bytes to read
* @throws IOException IO problems
*/
public static void verifyRead(FSDataInputStream stm, byte[] fileContents,
int seekOff, int toRead) throws IOException {
byte[] out = new byte[toRead];
stm.seek(seekOff);
stm.readFully(out);
byte[] expected = Arrays.copyOfRange(fileContents, seekOff,
seekOff + toRead);
compareByteArrays(expected, out,toRead);
}
/**
* Assert that tthe array original[0..len] and received[] are equal.
* A failure triggers the logging of the bytes near where the first
* difference surfaces.
* @param original source data
* @param received actual
* @param len length of bytes to compare
*/
public static void compareByteArrays(byte[] original,
byte[] received,
int len) {
assertEquals("Number of bytes read != number written",
len, received.length);
int errors = 0;
int first_error_byte = -1;
for (int i = 0; i < len; i++) {
if (original[i] != received[i]) {
if (errors == 0) {
first_error_byte = i;
}
errors++;
}
}
if (errors > 0) {
String message = String.format(" %d errors in file of length %d",
errors, len);
LOG.warn(message);
// the range either side of the first error to print
// this is a purely arbitrary number, to aid user debugging
final int overlap = 10;
for (int i = Math.max(0, first_error_byte - overlap);
i < Math.min(first_error_byte + overlap, len);
i++) {
byte actual = received[i];
byte expected = original[i];
String letter = toChar(actual);
String line = String.format("[%04d] %2x %s\n", i, actual, letter);
if (expected != actual) {
line = String.format("[%04d] %2x %s -expected %2x %s\n",
i,
actual,
letter,
expected,
toChar(expected));
}
LOG.warn(line);
}
fail(message);
}
}
/**
* Convert a byte to a character for printing. If the
* byte value is < 32 -and hence unprintable- the byte is
* returned as a two digit hex value
* @param b byte
* @return the printable character string
*/
public static String toChar(byte b) {
if (b >= 0x20) {
return Character.toString((char) b);
} else {
return String.format("%02x", b);
}
}
/**
* Convert a buffer to a string, character by character
* @param buffer input bytes
* @return a string conversion
*/
public static String toChar(byte[] buffer) {
StringBuilder builder = new StringBuilder(buffer.length);
for (byte b : buffer) {
builder.append(toChar(b));
}
return builder.toString();
}
public static byte[] toAsciiByteArray(String s) {
char[] chars = s.toCharArray();
int len = chars.length;
byte[] buffer = new byte[len];
for (int i = 0; i < len; i++) {
buffer[i] = (byte) (chars[i] & 0xff);
}
return buffer;
}
/**
* Cleanup at the end of a test run
* @param action action triggering the operation (for use in logging)
* @param fileSystem filesystem to work with. May be null
* @param cleanupPath path to delete as a string
*/
public static void cleanup(String action,
FileSystem fileSystem,
String cleanupPath) {
if (fileSystem == null) {
return;
}
Path path = new Path(cleanupPath).makeQualified(fileSystem.getUri(),
fileSystem.getWorkingDirectory());
cleanup(action, fileSystem, path);
}
/**
* Cleanup at the end of a test run
* @param action action triggering the operation (for use in logging)
* @param fileSystem filesystem to work with. May be null
* @param path path to delete
*/
public static void cleanup(String action, FileSystem fileSystem, Path path) {
noteAction(action);
try {
rm(fileSystem, path, true, false);
} catch (Exception e) {
LOG.error("Error deleting in "+ action + " - " + path + ": " + e, e);
}
}
/**
* Delete a directory. There's a safety check for operations against the
* root directory -these are intercepted and rejected with an IOException
* unless the allowRootDelete flag is true
* @param fileSystem filesystem to work with. May be null
* @param path path to delete
* @param recursive flag to enable recursive delete
* @param allowRootDelete can the root directory be deleted?
* @throws IOException on any problem.
*/
public static boolean rm(FileSystem fileSystem,
Path path,
boolean recursive,
boolean allowRootDelete) throws
IOException {
if (fileSystem != null) {
rejectRootOperation(path, allowRootDelete);
if (fileSystem.exists(path)) {
return fileSystem.delete(path, recursive);
}
}
return false;
}
/**
* Block any operation on the root path. This is a safety check
* @param path path in the filesystem
* @param allowRootOperation can the root directory be manipulated?
* @throws IOException if the operation was rejected
*/
public static void rejectRootOperation(Path path,
boolean allowRootOperation) throws IOException {
if (path.isRoot() && !allowRootOperation) {
throw new IOException("Root directory operation rejected: " + path);
}
}
/**
* Block any operation on the root path. This is a safety check
* @param path path in the filesystem
* @throws IOException if the operation was rejected
*/
public static void rejectRootOperation(Path path) throws IOException {
rejectRootOperation(path, false);
}
public static void noteAction(String action) {
if (LOG.isDebugEnabled()) {
LOG.debug("============== "+ action +" =============");
}
}
/**
* downgrade a failure to a message and a warning, then an
* exception for the Junit test runner to mark as failed
* @param message text message
* @param failure what failed
* @throws AssumptionViolatedException always
*/
public static void downgrade(String message, Throwable failure) {
LOG.warn("Downgrading test " + message, failure);
AssumptionViolatedException ave =
new AssumptionViolatedException(failure, null);
throw ave;
}
/**
* report an overridden test as unsupported
* @param message message to use in the text
* @throws AssumptionViolatedException always
*/
public static void unsupported(String message) {
skip(message);
}
/**
* report a test has been skipped for some reason
* @param message message to use in the text
* @throws AssumptionViolatedException always
*/
public static void skip(String message) {
LOG.info("Skipping: {}", message);
throw new AssumptionViolatedException(message);
}
/**
* Fail with an exception that was received
* @param text text to use in the exception
* @param thrown a (possibly null) throwable to init the cause with
* @throws AssertionError with the text and throwable -always
*/
public static void fail(String text, Throwable thrown) {
AssertionError e = new AssertionError(text);
e.initCause(thrown);
throw e;
}
/**
* Make an assertion about the length of a file
* @param fs filesystem
* @param path path of the file
* @param expected expected length
* @throws IOException on File IO problems
*/
public static void assertFileHasLength(FileSystem fs, Path path,
int expected) throws IOException {
FileStatus status = fs.getFileStatus(path);
assertEquals(
"Wrong file length of file " + path + " status: " + status,
expected,
status.getLen());
}
/**
* Assert that a path refers to a directory
* @param fs filesystem
* @param path path of the directory
* @throws IOException on File IO problems
*/
public static void assertIsDirectory(FileSystem fs,
Path path) throws IOException {
FileStatus fileStatus = fs.getFileStatus(path);
assertIsDirectory(fileStatus);
}
/**
* Assert that a path refers to a directory
* @param fileStatus stats to check
*/
public static void assertIsDirectory(FileStatus fileStatus) {
assertTrue("Should be a directory -but isn't: " + fileStatus,
fileStatus.isDirectory());
}
/**
* Write the text to a file, returning the converted byte array
* for use in validating the round trip
* @param fs filesystem
* @param path path of file
* @param text text to write
* @param overwrite should the operation overwrite any existing file?
* @return the read bytes
* @throws IOException on IO problems
*/
public static byte[] writeTextFile(FileSystem fs,
Path path,
String text,
boolean overwrite) throws IOException {
byte[] bytes = new byte[0];
if (text != null) {
bytes = toAsciiByteArray(text);
}
createFile(fs, path, overwrite, bytes);
return bytes;
}
/**
* Create a file
* @param fs filesystem
* @param path path to write
* @param overwrite overwrite flag
* @param data source dataset. Can be null
* @throws IOException on any problem
*/
public static void createFile(FileSystem fs,
Path path,
boolean overwrite,
byte[] data) throws IOException {
FSDataOutputStream stream = fs.create(path, overwrite);
if (data != null && data.length > 0) {
stream.write(data);
}
stream.close();
}
/**
* Touch a file
* @param fs filesystem
* @param path path
* @throws IOException IO problems
*/
public static void touch(FileSystem fs,
Path path) throws IOException {
createFile(fs, path, true, null);
}
/**
* Delete a file/dir and assert that delete() returned true
* <i>and</i> that the path no longer exists. This variant rejects
* all operations on root directories
* @param fs filesystem
* @param file path to delete
* @param recursive flag to enable recursive delete
* @throws IOException IO problems
*/
public static void assertDeleted(FileSystem fs,
Path file,
boolean recursive) throws IOException {
assertDeleted(fs, file, recursive, false);
}
/**
* Delete a file/dir and assert that delete() returned true
* <i>and</i> that the path no longer exists. This variant rejects
* all operations on root directories
* @param fs filesystem
* @param file path to delete
* @param recursive flag to enable recursive delete
* @param allowRootOperations can the root dir be deleted?
* @throws IOException IO problems
*/
public static void assertDeleted(FileSystem fs,
Path file,
boolean recursive,
boolean allowRootOperations) throws IOException {
rejectRootOperation(file, allowRootOperations);
assertPathExists(fs, "about to be deleted file", file);
boolean deleted = fs.delete(file, recursive);
String dir = ls(fs, file.getParent());
assertTrue("Delete failed on " + file + ": " + dir, deleted);
assertPathDoesNotExist(fs, "Deleted file", file);
}
/**
* Read in "length" bytes, convert to an ascii string
* @param fs filesystem
* @param path path to read
* @param length #of bytes to read.
* @return the bytes read and converted to a string
* @throws IOException IO problems
*/
public static String readBytesToString(FileSystem fs,
Path path,
int length) throws IOException {
FSDataInputStream in = fs.open(path);
try {
byte[] buf = new byte[length];
in.readFully(0, buf);
return toChar(buf);
} finally {
in.close();
}
}
/**
* Take an array of filestats and convert to a string (prefixed w/ a [01] counter
* @param stats array of stats
* @param separator separator after every entry
* @return a stringified set
*/
public static String fileStatsToString(FileStatus[] stats, String separator) {
StringBuilder buf = new StringBuilder(stats.length * 128);
for (int i = 0; i < stats.length; i++) {
buf.append(String.format("[%02d] %s", i, stats[i])).append(separator);
}
return buf.toString();
}
/**
* List a directory
* @param fileSystem FS
* @param path path
* @return a directory listing or failure message
* @throws IOException
*/
public static String ls(FileSystem fileSystem, Path path) throws IOException {
if (path == null) {
//surfaces when someone calls getParent() on something at the top of the path
return "/";
}
FileStatus[] stats;
String pathtext = "ls " + path;
try {
stats = fileSystem.listStatus(path);
} catch (FileNotFoundException e) {
return pathtext + " -file not found";
} catch (IOException e) {
return pathtext + " -failed: " + e;
}
return dumpStats(pathtext, stats);
}
public static String dumpStats(String pathname, FileStatus[] stats) {
return pathname + fileStatsToString(stats, "\n");
}
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
* @param fileSystem filesystem to resolve path against
* @param filename name of the file
* @throws IOException IO problems during file operations
*/
public static void assertIsFile(FileSystem fileSystem, Path filename) throws
IOException {
assertPathExists(fileSystem, "Expected file", filename);
FileStatus status = fileSystem.getFileStatus(filename);
assertIsFile(filename, status);
}
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
* @param filename name of the file
* @param status file status
*/
public static void assertIsFile(Path filename, FileStatus status) {
String fileInfo = filename + " " + status;
assertFalse("File claims to be a directory " + fileInfo,
status.isDirectory());
assertFalse("File claims to be a symlink " + fileInfo,
status.isSymlink());
}
/**
* Create a dataset for use in the tests; all data is in the range
* base to (base+modulo-1) inclusive
* @param len length of data
* @param base base of the data
* @param modulo the modulo
* @return the newly generated dataset
*/
public static byte[] dataset(int len, int base, int modulo) {
byte[] dataset = new byte[len];
for (int i = 0; i < len; i++) {
dataset[i] = (byte) (base + (i % modulo));
}
return dataset;
}
/**
* Assert that a path exists -but make no assertions as to the
* type of that entry
*
* @param fileSystem filesystem to examine
* @param message message to include in the assertion failure message
* @param path path in the filesystem
* @throws FileNotFoundException raised if the path is missing
* @throws IOException IO problems
*/
public static void assertPathExists(FileSystem fileSystem, String message,
Path path) throws IOException {
if (!fileSystem.exists(path)) {
//failure, report it
ls(fileSystem, path.getParent());
throw new FileNotFoundException(message + ": not found " + path
+ " in " + path.getParent());
}
}
/**
* Assert that a path does not exist
*
* @param fileSystem filesystem to examine
* @param message message to include in the assertion failure message
* @param path path in the filesystem
* @throws IOException IO problems
*/
public static void assertPathDoesNotExist(FileSystem fileSystem,
String message,
Path path) throws IOException {
try {
FileStatus status = fileSystem.getFileStatus(path);
fail(message + ": unexpectedly found " + path + " as " + status);
} catch (FileNotFoundException expected) {
//this is expected
}
}
/**
* Assert that a FileSystem.listStatus on a dir finds the subdir/child entry
* @param fs filesystem
* @param dir directory to scan
* @param subdir full path to look for
* @throws IOException IO probles
*/
public static void assertListStatusFinds(FileSystem fs,
Path dir,
Path subdir) throws IOException {
FileStatus[] stats = fs.listStatus(dir);
boolean found = false;
StringBuilder builder = new StringBuilder();
for (FileStatus stat : stats) {
builder.append(stat.toString()).append('\n');
if (stat.getPath().equals(subdir)) {
found = true;
}
}
assertTrue("Path " + subdir
+ " not found in directory " + dir + ":" + builder,
found);
}
/**
* Test for the host being an OSX machine
* @return true if the JVM thinks that is running on OSX
*/
public static boolean isOSX() {
return System.getProperty("os.name").contains("OS X");
}
/**
* compare content of file operations using a double byte array
* @param concat concatenated files
* @param bytes bytes
*/
public static void validateFileContent(byte[] concat, byte[][] bytes) {
int idx = 0;
boolean mismatch = false;
for (byte[] bb : bytes) {
for (byte b : bb) {
if (b != concat[idx++]) {
mismatch = true;
break;
}
}
if (mismatch)
break;
}
assertFalse("File content of file is not as expected at offset " + idx,
mismatch);
}
/**
* Receives test data from the given input file and checks the size of the
* data as well as the pattern inside the received data.
*
* @param fs FileSystem
* @param path Input file to be checked
* @param expectedSize the expected size of the data to be read from the
* input file in bytes
* @param bufferLen Pattern length
* @param modulus Pattern modulus
* @throws IOException
* thrown if an error occurs while reading the data
*/
public static void verifyReceivedData(FileSystem fs, Path path,
final long expectedSize,
final int bufferLen,
final int modulus) throws IOException {
final byte[] testBuffer = new byte[bufferLen];
long totalBytesRead = 0;
int nextExpectedNumber = 0;
final InputStream inputStream = fs.open(path);
try {
while (true) {
final int bytesRead = inputStream.read(testBuffer);
if (bytesRead < 0) {
break;
}
totalBytesRead += bytesRead;
for (int i = 0; i < bytesRead; ++i) {
if (testBuffer[i] != nextExpectedNumber) {
throw new IOException("Read number " + testBuffer[i]
+ " but expected " + nextExpectedNumber);
}
++nextExpectedNumber;
if (nextExpectedNumber == modulus) {
nextExpectedNumber = 0;
}
}
}
if (totalBytesRead != expectedSize) {
throw new IOException("Expected to read " + expectedSize +
" bytes but only received " + totalBytesRead);
}
} finally {
inputStream.close();
}
}
/**
* Generates test data of the given size according to some specific pattern
* and writes it to the provided output file.
*
* @param fs FileSystem
* @param path Test file to be generated
* @param size The size of the test data to be generated in bytes
* @param bufferLen Pattern length
* @param modulus Pattern modulus
* @throws IOException
* thrown if an error occurs while writing the data
*/
public static long generateTestFile(FileSystem fs, Path path,
final long size,
final int bufferLen,
final int modulus) throws IOException {
final byte[] testBuffer = new byte[bufferLen];
for (int i = 0; i < testBuffer.length; ++i) {
testBuffer[i] = (byte) (i % modulus);
}
final OutputStream outputStream = fs.create(path, false);
long bytesWritten = 0;
try {
while (bytesWritten < size) {
final long diff = size - bytesWritten;
if (diff < testBuffer.length) {
outputStream.write(testBuffer, 0, (int) diff);
bytesWritten += diff;
} else {
outputStream.write(testBuffer);
bytesWritten += testBuffer.length;
}
}
return bytesWritten;
} finally {
outputStream.close();
}
}
/**
* Creates and reads a file with the given size. The test file is generated
* according to a specific pattern so it can be easily verified even if it's
* a multi-GB one.
* During the read phase the incoming data stream is also checked against
* this pattern.
*
* @param fs FileSystem
* @param parent Test file parent dir path
* @throws IOException
* thrown if an I/O error occurs while writing or reading the test file
*/
public static void createAndVerifyFile(FileSystem fs, Path parent, final long fileSize)
throws IOException {
int testBufferSize = fs.getConf()
.getInt(IO_CHUNK_BUFFER_SIZE, DEFAULT_IO_CHUNK_BUFFER_SIZE);
int modulus = fs.getConf()
.getInt(IO_CHUNK_MODULUS_SIZE, DEFAULT_IO_CHUNK_MODULUS_SIZE);
final String objectName = UUID.randomUUID().toString();
final Path objectPath = new Path(parent, objectName);
// Write test file in a specific pattern
assertEquals(fileSize,
generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
assertPathExists(fs, "not created successful", objectPath);
// Now read the same file back and verify its content
try {
verifyReceivedData(fs, objectPath, fileSize, testBufferSize, modulus);
} finally {
// Delete test file
fs.delete(objectPath, false);
}
}
}
| 30,275 | 32.677419 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
/**
* This class does things to the root directory.
* Only subclass this for tests against transient filesystems where
* you don't care about the data.
*/
public abstract class AbstractContractRootDirectoryTest extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class);
@Override
public void setup() throws Exception {
super.setup();
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
}
@Test
public void testMkDirDepth1() throws Throwable {
FileSystem fs = getFileSystem();
Path dir = new Path("/testmkdirdepth1");
assertPathDoesNotExist("directory already exists", dir);
fs.mkdirs(dir);
ContractTestUtils.assertIsDirectory(getFileSystem(), dir);
assertPathExists("directory already exists", dir);
assertDeleted(dir, true);
}
@Test
public void testRmEmptyRootDirNonRecursive() throws Throwable {
//extra sanity checks here to avoid support calls about complete loss of data
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
Path root = new Path("/");
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
boolean deleted = getFileSystem().delete(root, true);
LOG.info("rm / of empty dir result is {}", deleted);
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
}
@Test
public void testRmNonEmptyRootDirNonRecursive() throws Throwable {
//extra sanity checks here to avoid support calls about complete loss of data
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
Path root = new Path("/");
String touchfile = "/testRmNonEmptyRootDirNonRecursive";
Path file = new Path(touchfile);
ContractTestUtils.touch(getFileSystem(), file);
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
try {
boolean deleted = getFileSystem().delete(root, false);
fail("non recursive delete should have raised an exception," +
" but completed with exit code " + deleted);
} catch (IOException e) {
//expected
handleExpectedException(e);
} finally {
getFileSystem().delete(file, false);
}
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
}
@Test
public void testRmRootRecursive() throws Throwable {
//extra sanity checks here to avoid support calls about complete loss of data
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
Path root = new Path("/");
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
Path file = new Path("/testRmRootRecursive");
ContractTestUtils.touch(getFileSystem(), file);
boolean deleted = getFileSystem().delete(root, true);
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
LOG.info("rm -rf / result is {}", deleted);
if (deleted) {
assertPathDoesNotExist("expected file to be deleted", file);
} else {
assertPathExists("expected file to be preserved", file);;
}
}
@Test
public void testCreateFileOverRoot() throws Throwable {
Path root = new Path("/");
byte[] dataset = dataset(1024, ' ', 'z');
try {
createFile(getFileSystem(), root, false, dataset);
fail("expected an exception, got a file created over root: " + ls(root));
} catch (IOException e) {
//expected
handleExpectedException(e);
}
assertIsDirectory(root);
}
}
| 4,544 | 35.653226 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
/**
* Options for contract tests: keys for FS-specific values,
* defaults.
*/
public interface ContractOptions {
/**
* name of the (optional) resource containing filesystem binding keys : {@value}
* If found, it it will be loaded
*/
String CONTRACT_OPTIONS_RESOURCE = "contract-test-options.xml";
/**
* Prefix for all contract keys in the configuration files
*/
String FS_CONTRACT_KEY = "fs.contract.";
/**
* Is a filesystem case sensitive.
* Some of the filesystems that say "no" here may mean
* that it varies from platform to platform -the localfs being the key
* example.
*/
String IS_CASE_SENSITIVE = "is-case-sensitive";
/**
* Blobstore flag. Implies it's not a real directory tree and
* consistency is below that which Hadoop expects
*/
String IS_BLOBSTORE = "is-blobstore";
/**
* Flag to indicate that the FS can rename into directories that
* don't exist, creating them as needed.
* @{value}
*/
String RENAME_CREATES_DEST_DIRS = "rename-creates-dest-dirs";
/**
* Flag to indicate that the FS does not follow the rename contract -and
* instead only returns false on a failure.
* @{value}
*/
String RENAME_OVERWRITES_DEST = "rename-overwrites-dest";
/**
* Flag to indicate that the FS returns false if the destination exists
* @{value}
*/
String RENAME_RETURNS_FALSE_IF_DEST_EXISTS =
"rename-returns-false-if-dest-exists";
/**
* Flag to indicate that the FS returns false on a rename
* if the source is missing
* @{value}
*/
String RENAME_RETURNS_FALSE_IF_SOURCE_MISSING =
"rename-returns-false-if-source-missing";
/**
* Flag to indicate that the FS remove dest first if it is an empty directory
* mean the FS honors POSIX rename behavior.
* @{value}
*/
String RENAME_REMOVE_DEST_IF_EMPTY_DIR = "rename-remove-dest-if-empty-dir";
/**
* Flag to indicate that append is supported
* @{value}
*/
String SUPPORTS_APPEND = "supports-append";
/**
* Flag to indicate that renames are atomic
* @{value}
*/
String SUPPORTS_ATOMIC_RENAME = "supports-atomic-rename";
/**
* Flag to indicate that directory deletes are atomic
* @{value}
*/
String SUPPORTS_ATOMIC_DIRECTORY_DELETE = "supports-atomic-directory-delete";
/**
* Does the FS support multiple block locations?
* @{value}
*/
String SUPPORTS_BLOCK_LOCALITY = "supports-block-locality";
/**
* Does the FS support the concat() operation?
* @{value}
*/
String SUPPORTS_CONCAT = "supports-concat";
/**
* Is seeking supported at all?
* @{value}
*/
String SUPPORTS_SEEK = "supports-seek";
/**
* Is seeking past the EOF allowed?
* @{value}
*/
String REJECTS_SEEK_PAST_EOF = "rejects-seek-past-eof";
/**
* Is seeking on a closed file supported? Some filesystems only raise an
* exception later, when trying to read.
* @{value}
*/
String SUPPORTS_SEEK_ON_CLOSED_FILE = "supports-seek-on-closed-file";
/**
* Flag to indicate that this FS expects to throw the strictest
* exceptions it can, not generic IOEs, which, if returned,
* must be rejected.
* @{value}
*/
String SUPPORTS_STRICT_EXCEPTIONS = "supports-strict-exceptions";
/**
* Are unix permissions
* @{value}
*/
String SUPPORTS_UNIX_PERMISSIONS = "supports-unix-permissions";
/**
* Maximum path length
* @{value}
*/
String MAX_PATH_ = "max-path";
/**
* Maximum filesize: 0 or -1 for no limit
* @{value}
*/
String MAX_FILESIZE = "max-filesize";
/**
* Flag to indicate that tests on the root directories of a filesystem/
* object store are permitted
* @{value}
*/
String TEST_ROOT_TESTS_ENABLED = "test.root-tests-enabled";
/**
* Limit for #of random seeks to perform.
* Keep low for remote filesystems for faster tests
*/
String TEST_RANDOM_SEEK_COUNT = "test.random-seek-count";
}
| 4,805 | 26 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import org.junit.internal.AssumptionViolatedException;
import java.io.FileNotFoundException;
import java.io.IOException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeTextFile;
/**
* Test creating files, overwrite options &c
*/
public abstract class AbstractContractCreateTest extends
AbstractFSContractTestBase {
@Test
public void testCreateNewFile() throws Throwable {
describe("Foundational 'create a file' test");
Path path = path("testCreateNewFile");
byte[] data = dataset(256, 'a', 'z');
writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false);
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
}
@Test
public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
describe("Verify overwriting an existing file fails");
Path path = path("testCreateFileOverExistingFileNoOverwrite");
byte[] data = dataset(256, 'a', 'z');
writeDataset(getFileSystem(), path, data, data.length, 1024, false);
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
try {
writeDataset(getFileSystem(), path, data2, data2.length, 1024, false);
fail("writing without overwrite unexpectedly succeeded");
} catch (FileAlreadyExistsException expected) {
//expected
handleExpectedException(expected);
} catch (IOException relaxed) {
handleRelaxedException("Creating a file over a file with overwrite==false",
"FileAlreadyExistsException",
relaxed);
}
}
/**
* This test catches some eventual consistency problems that blobstores exhibit,
* as we are implicitly verifying that updates are consistent. This
* is why different file lengths and datasets are used
* @throws Throwable
*/
@Test
public void testOverwriteExistingFile() throws Throwable {
describe("Overwrite an existing file and verify the new data is there");
Path path = path("testOverwriteExistingFile");
byte[] data = dataset(256, 'a', 'z');
writeDataset(getFileSystem(), path, data, data.length, 1024, false);
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
writeDataset(getFileSystem(), path, data2, data2.length, 1024, true);
ContractTestUtils.verifyFileContents(getFileSystem(), path, data2);
}
@Test
public void testOverwriteEmptyDirectory() throws Throwable {
describe("verify trying to create a file over an empty dir fails");
Path path = path("testOverwriteEmptyDirectory");
mkdirs(path);
assertIsDirectory(path);
byte[] data = dataset(256, 'a', 'z');
try {
writeDataset(getFileSystem(), path, data, data.length, 1024, true);
assertIsDirectory(path);
fail("write of file over empty dir succeeded");
} catch (FileAlreadyExistsException expected) {
//expected
handleExpectedException(expected);
} catch (FileNotFoundException e) {
handleRelaxedException("overwriting a dir with a file ",
"FileAlreadyExistsException",
e);
} catch (IOException e) {
handleRelaxedException("overwriting a dir with a file ",
"FileAlreadyExistsException",
e);
}
assertIsDirectory(path);
}
@Test
public void testOverwriteNonEmptyDirectory() throws Throwable {
describe("verify trying to create a file over a non-empty dir fails");
Path path = path("testOverwriteNonEmptyDirectory");
mkdirs(path);
try {
assertIsDirectory(path);
} catch (AssertionError failure) {
if (isSupported(IS_BLOBSTORE)) {
// file/directory hack surfaces here
throw new AssumptionViolatedException(failure.toString()).initCause(failure);
}
// else: rethrow
throw failure;
}
Path child = new Path(path, "child");
writeTextFile(getFileSystem(), child, "child file", true);
byte[] data = dataset(256, 'a', 'z');
try {
writeDataset(getFileSystem(), path, data, data.length, 1024,
true);
FileStatus status = getFileSystem().getFileStatus(path);
boolean isDir = status.isDirectory();
if (!isDir && isSupported(IS_BLOBSTORE)) {
// object store: downgrade to a skip so that the failure is visible
// in test results
skip("Object store allows a file to overwrite a directory");
}
fail("write of file over dir succeeded");
} catch (FileAlreadyExistsException expected) {
//expected
handleExpectedException(expected);
} catch (FileNotFoundException e) {
handleRelaxedException("overwriting a dir with a file ",
"FileAlreadyExistsException",
e);
} catch (IOException e) {
handleRelaxedException("overwriting a dir with a file ",
"FileAlreadyExistsException",
e);
}
assertIsDirectory(path);
assertIsFile(child);
}
@Test
public void testCreatedFileIsImmediatelyVisible() throws Throwable {
describe("verify that a newly created file exists as soon as open returns");
Path path = path("testCreatedFileIsImmediatelyVisible");
FSDataOutputStream out = null;
try {
out = getFileSystem().create(path,
false,
4096,
(short) 1,
1024);
if (!getFileSystem().exists(path)) {
if (isSupported(IS_BLOBSTORE)) {
// object store: downgrade to a skip so that the failure is visible
// in test results
skip("Filesystem is an object store and newly created files are not immediately visible");
}
assertPathExists("expected path to be visible before anything written",
path);
}
} finally {
IOUtils.closeStream(out);
}
}
}
| 7,413 | 38.43617 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* This is a filesystem contract for any class that bonds to a filesystem
* through the configuration.
*
* It looks for a definition of the test filesystem with the key
* derived from "fs.contract.test.fs.%s" -if found the value
* is converted to a URI and used to create a filesystem. If not -the
* tests are not enabled
*/
public abstract class AbstractBondedFSContract extends AbstractFSContract {
private static final Log LOG =
LogFactory.getLog(AbstractBondedFSContract.class);
/**
* Pattern for the option for test filesystems from schema
*/
public static final String FSNAME_OPTION = "test.fs.%s";
/**
* Constructor: loads the authentication keys if found
* @param conf configuration to work with
*/
protected AbstractBondedFSContract(Configuration conf) {
super(conf);
}
private String fsName;
private URI fsURI;
private FileSystem filesystem;
@Override
public void init() throws IOException {
super.init();
//this test is only enabled if the test FS is present
fsName = loadFilesystemName(getScheme());
setEnabled(!fsName.isEmpty());
if (isEnabled()) {
try {
fsURI = new URI(fsName);
filesystem = FileSystem.get(fsURI, getConf());
} catch (URISyntaxException e) {
throw new IOException("Invalid URI " + fsName);
} catch (IllegalArgumentException e) {
throw new IOException("Invalid URI " + fsName, e);
}
} else {
LOG.info("skipping tests as FS name is not defined in "
+ getFilesystemConfKey());
}
}
/**
* Load the name of a test filesystem.
* @param schema schema to look up
* @return the filesystem name -or "" if none was defined
*/
public String loadFilesystemName(String schema) {
return getOption(String.format(FSNAME_OPTION, schema), "");
}
/**
* Get the conf key for a filesystem
*/
protected String getFilesystemConfKey() {
return getConfKey(String.format(FSNAME_OPTION, getScheme()));
}
@Override
public FileSystem getTestFileSystem() throws IOException {
return filesystem;
}
@Override
public Path getTestPath() {
Path path = new Path("/test");
return path;
}
@Override
public String toString() {
return getScheme() +" Contract against " + fsName;
}
}
| 3,457 | 28.810345 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import java.io.IOException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
/**
* Test directory operations
*/
public abstract class AbstractContractMkdirTest extends AbstractFSContractTestBase {
@Test
public void testMkDirRmDir() throws Throwable {
FileSystem fs = getFileSystem();
Path dir = path("testMkDirRmDir");
assertPathDoesNotExist("directory already exists", dir);
fs.mkdirs(dir);
assertPathExists("mkdir failed", dir);
assertDeleted(dir, false);
}
@Test
public void testMkDirRmRfDir() throws Throwable {
describe("create a directory then recursive delete it");
FileSystem fs = getFileSystem();
Path dir = path("testMkDirRmRfDir");
assertPathDoesNotExist("directory already exists", dir);
fs.mkdirs(dir);
assertPathExists("mkdir failed", dir);
assertDeleted(dir, true);
}
@Test
public void testNoMkdirOverFile() throws Throwable {
describe("try to mkdir over a file");
FileSystem fs = getFileSystem();
Path path = path("testNoMkdirOverFile");
byte[] dataset = dataset(1024, ' ', 'z');
createFile(getFileSystem(), path, false, dataset);
try {
boolean made = fs.mkdirs(path);
fail("mkdirs did not fail over a file but returned " + made
+ "; " + ls(path));
} catch (ParentNotDirectoryException e) {
//parent is a directory
handleExpectedException(e);
} catch (FileAlreadyExistsException e) {
//also allowed as an exception (HDFS)
handleExpectedException(e);;
} catch (IOException e) {
//here the FS says "no create"
handleRelaxedException("mkdirs", "FileAlreadyExistsException", e);
}
assertIsFile(path);
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), path,
dataset.length);
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
assertPathExists("mkdir failed", path);
assertDeleted(path, true);
}
@Test
public void testMkdirOverParentFile() throws Throwable {
describe("try to mkdir where a parent is a file");
FileSystem fs = getFileSystem();
Path path = path("testMkdirOverParentFile");
byte[] dataset = dataset(1024, ' ', 'z');
createFile(getFileSystem(), path, false, dataset);
Path child = new Path(path,"child-to-mkdir");
try {
boolean made = fs.mkdirs(child);
fail("mkdirs did not fail over a file but returned " + made
+ "; " + ls(path));
} catch (ParentNotDirectoryException e) {
//parent is a directory
handleExpectedException(e);
} catch (FileAlreadyExistsException e) {
handleExpectedException(e);
} catch (IOException e) {
handleRelaxedException("mkdirs", "ParentNotDirectoryException", e);
}
assertIsFile(path);
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), path,
dataset.length);
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
assertPathExists("mkdir failed", path);
assertDeleted(path, true);
}
@Test
public void testMkdirSlashHandling() throws Throwable {
describe("verify mkdir slash handling");
FileSystem fs = getFileSystem();
// No trailing slash
assertTrue(fs.mkdirs(path("testmkdir/a")));
assertPathExists("mkdir without trailing slash failed",
path("testmkdir/a"));
// With trailing slash
assertTrue(fs.mkdirs(path("testmkdir/b/")));
assertPathExists("mkdir with trailing slash failed", path("testmkdir/b/"));
// Mismatched slashes
assertPathExists("check path existence without trailing slash failed",
path("testmkdir/b"));
}
}
| 4,877 | 35.133333 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.junit.Test;
import java.io.IOException;
/**
* Test creating files, overwrite options &c
*/
public abstract class AbstractContractDeleteTest extends
AbstractFSContractTestBase {
@Test
public void testDeleteEmptyDirNonRecursive() throws Throwable {
Path path = path("testDeleteEmptyDirNonRecursive");
mkdirs(path);
assertDeleted(path, false);
}
@Test
public void testDeleteEmptyDirRecursive() throws Throwable {
Path path = path("testDeleteEmptyDirRecursive");
mkdirs(path);
assertDeleted(path, true);
}
@Test
public void testDeleteNonexistentPathRecursive() throws Throwable {
Path path = path("testDeleteNonexistentPathRecursive");
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "leftover", path);
ContractTestUtils.rejectRootOperation(path);
assertFalse("Returned true attempting to delete"
+ " a nonexistent path " + path,
getFileSystem().delete(path, false));
}
@Test
public void testDeleteNonexistentPathNonRecursive() throws Throwable {
Path path = path("testDeleteNonexistentPathNonRecursive");
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "leftover", path);
ContractTestUtils.rejectRootOperation(path);
assertFalse("Returned true attempting to recursively delete"
+ " a nonexistent path " + path,
getFileSystem().delete(path, false));
}
@Test
public void testDeleteNonEmptyDirNonRecursive() throws Throwable {
Path path = path("testDeleteNonEmptyDirNonRecursive");
mkdirs(path);
Path file = new Path(path, "childfile");
ContractTestUtils.writeTextFile(getFileSystem(), file, "goodbye, world",
true);
try {
ContractTestUtils.rejectRootOperation(path);
boolean deleted = getFileSystem().delete(path, false);
fail("non recursive delete should have raised an exception," +
" but completed with exit code " + deleted);
} catch (IOException expected) {
//expected
handleExpectedException(expected);
}
ContractTestUtils.assertIsDirectory(getFileSystem(), path);
}
@Test
public void testDeleteNonEmptyDirRecursive() throws Throwable {
Path path = path("testDeleteNonEmptyDirNonRecursive");
mkdirs(path);
Path file = new Path(path, "childfile");
ContractTestUtils.writeTextFile(getFileSystem(), file, "goodbye, world",
true);
assertDeleted(path, true);
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "not deleted", file);
}
@Test
public void testDeleteDeepEmptyDir() throws Throwable {
mkdirs(path("testDeleteDeepEmptyDir/d1/d2/d3/d4"));
assertDeleted(path("testDeleteDeepEmptyDir/d1/d2/d3"), true);
FileSystem fs = getFileSystem();
ContractTestUtils.assertPathDoesNotExist(fs,
"not deleted", path("testDeleteDeepEmptyDir/d1/d2/d3/d4"));
ContractTestUtils.assertPathDoesNotExist(fs,
"not deleted", path("testDeleteDeepEmptyDir/d1/d2/d3"));
ContractTestUtils.assertPathExists(fs, "parent dir is deleted",
path("testDeleteDeepEmptyDir/d1/d2"));
}
@Test
public void testDeleteSingleFile() throws Throwable {
// Test delete of just a file
Path path = path("testDeleteSingleFile/d1/d2");
mkdirs(path);
Path file = new Path(path, "childfile");
ContractTestUtils.writeTextFile(getFileSystem(), file,
"single file to be deleted.", true);
ContractTestUtils.assertPathExists(getFileSystem(),
"single file not created", file);
assertDeleted(file, false);
}
}
| 4,618 | 35.952 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
/**
* Class representing a filesystem contract that a filesystem
* implementation is expected implement.
*
* Part of this contract class is to allow FS implementations to
* provide specific opt outs and limits, so that tests can be
* skip unsupported features (e.g. case sensitivity tests),
* dangerous operations (e.g. trying to delete the root directory),
* and limit filesize and other numeric variables for scale tests
*/
public abstract class AbstractFSContract extends Configured {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractFSContract.class);
private boolean enabled = true;
/**
* Constructor: loads the authentication keys if found
* @param conf configuration to work with
*/
protected AbstractFSContract(Configuration conf) {
super(conf);
if (maybeAddConfResource(ContractOptions.CONTRACT_OPTIONS_RESOURCE)) {
LOG.debug("Loaded authentication keys from {}", ContractOptions.CONTRACT_OPTIONS_RESOURCE);
} else {
LOG.debug("Not loaded: {}", ContractOptions.CONTRACT_OPTIONS_RESOURCE);
}
}
/**
* Any initialisation logic can go here
* @throws IOException IO problems
*/
public void init() throws IOException {
}
/**
* Add a configuration resource to this instance's configuration
* @param resource resource reference
* @throws AssertionError if the resource was not found.
*/
protected void addConfResource(String resource) {
boolean found = maybeAddConfResource(resource);
Assert.assertTrue("Resource not found " + resource, found);
}
/**
* Add a configuration resource to this instance's configuration,
* return true if the resource was found
* @param resource resource reference
*/
protected boolean maybeAddConfResource(String resource) {
URL url = this.getClass().getClassLoader().getResource(resource);
boolean found = url != null;
if (found) {
getConf().addResource(resource);
}
return found;
}
/**
* Get the FS from a URI. The default implementation just retrieves
* it from the norrmal FileSystem factory/cache, with the local configuration
* @param uri URI of FS
* @return the filesystem
* @throws IOException IO problems
*/
public FileSystem getFileSystem(URI uri) throws IOException {
return FileSystem.get(uri, getConf());
}
/**
* Get the filesystem for these tests
* @return the test fs
* @throws IOException IO problems
*/
public abstract FileSystem getTestFileSystem() throws IOException;
/**
* Get the scheme of this FS
* @return the scheme this FS supports
*/
public abstract String getScheme();
/**
* Return the path string for tests, e.g. <code>file:///tmp</code>
* @return a path in the test FS
*/
public abstract Path getTestPath();
/**
* Boolean to indicate whether or not the contract test are enabled
* for this test run.
* @return true if the tests can be run.
*/
public boolean isEnabled() {
return enabled;
}
/**
* Boolean to indicate whether or not the contract test are enabled
* for this test run.
* @param enabled flag which must be true if the tests can be run.
*/
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
/**
* Query for a feature being supported. This may include a probe for the feature
*
* @param feature feature to query
* @param defval default value
* @return true if the feature is supported
* @throws IOException IO problems
*/
public boolean isSupported(String feature, boolean defval) {
return getConf().getBoolean(getConfKey(feature), defval);
}
/**
* Query for a feature's limit. This may include a probe for the feature
*
* @param feature feature to query
* @param defval default value
* @return true if the feature is supported
* @throws IOException IO problems
*/
public int getLimit(String feature, int defval) {
return getConf().getInt(getConfKey(feature), defval);
}
public String getOption(String feature, String defval) {
return getConf().get(getConfKey(feature), defval);
}
/**
* Build a configuration key
* @param feature feature to query
* @return the configuration key base with the feature appended
*/
public String getConfKey(String feature) {
return ContractOptions.FS_CONTRACT_KEY + feature;
}
/**
* Create a URI off the scheme
* @param path path of URI
* @return a URI
* @throws IOException if the URI could not be created
*/
protected URI toURI(String path) throws IOException {
try {
return new URI(getScheme(),path, null);
} catch (URISyntaxException e) {
throw new IOException(e.toString() + " with " + path, e);
}
}
@Override
public String toString() {
return "FSContract for " + getScheme();
}
}
| 6,060 | 29.00495 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.internal.AssumptionViolatedException;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
/**
* This is the base class for all the contract tests
*/
public abstract class AbstractFSContractTestBase extends Assert
implements ContractOptions {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractFSContractTestBase.class);
/**
* Length of files to work with: {@value}
*/
public static final int TEST_FILE_LEN = 1024;
/**
* standard test timeout: {@value}
*/
public static final int DEFAULT_TEST_TIMEOUT = 180 * 1000;
/**
* The FS contract used for these tets
*/
private AbstractFSContract contract;
/**
* The test filesystem extracted from it
*/
private FileSystem fileSystem;
/**
* The path for tests
*/
private Path testPath;
/**
* This must be implemented by all instantiated test cases
* -provide the FS contract
* @return the FS contract
*/
protected abstract AbstractFSContract createContract(Configuration conf);
/**
* Get the contract
* @return the contract, which will be non-null once the setup operation has
* succeeded
*/
protected AbstractFSContract getContract() {
return contract;
}
/**
* Get the filesystem created in startup
* @return the filesystem to use for tests
*/
public FileSystem getFileSystem() {
return fileSystem;
}
/**
* Get the log of the base class
* @return a logger
*/
public static Logger getLog() {
return LOG;
}
/**
* Skip a test if a feature is unsupported in this FS
* @param feature feature to look for
* @throws IOException IO problem
*/
protected void skipIfUnsupported(String feature) throws IOException {
if (!isSupported(feature)) {
skip("Skipping as unsupported feature: " + feature);
}
}
/**
* Is a feature supported?
* @param feature feature
* @return true iff the feature is supported
* @throws IOException IO problems
*/
protected boolean isSupported(String feature) throws IOException {
return contract.isSupported(feature, false);
}
/**
* Include at the start of tests to skip them if the FS is not enabled.
*/
protected void assumeEnabled() {
if (!contract.isEnabled())
throw new AssumptionViolatedException("test cases disabled for " + contract);
}
/**
* Create a configuration. May be overridden by tests/instantiations
* @return a configuration
*/
protected Configuration createConfiguration() {
return new Configuration();
}
/**
* Set the timeout for every test
*/
@Rule
public Timeout testTimeout = new Timeout(getTestTimeoutMillis());
/**
* Option for tests to override the default timeout value
* @return the current test timeout
*/
protected int getTestTimeoutMillis() {
return DEFAULT_TEST_TIMEOUT;
}
/**
* Setup: create the contract then init it
* @throws Exception on any failure
*/
@Before
public void setup() throws Exception {
contract = createContract(createConfiguration());
contract.init();
//skip tests if they aren't enabled
assumeEnabled();
//extract the test FS
fileSystem = contract.getTestFileSystem();
assertNotNull("null filesystem", fileSystem);
URI fsURI = fileSystem.getUri();
LOG.info("Test filesystem = {} implemented by {}",
fsURI, fileSystem);
//sanity check to make sure that the test FS picked up really matches
//the scheme chosen. This is to avoid defaulting back to the localFS
//which would be drastic for root FS tests
assertEquals("wrong filesystem of " + fsURI,
contract.getScheme(), fsURI.getScheme());
//create the test path
testPath = getContract().getTestPath();
mkdirs(testPath);
}
/**
* Teardown
* @throws Exception on any failure
*/
@After
public void teardown() throws Exception {
deleteTestDirInTeardown();
}
/**
* Delete the test dir in the per-test teardown
* @throws IOException
*/
protected void deleteTestDirInTeardown() throws IOException {
cleanup("TEARDOWN", getFileSystem(), testPath);
}
/**
* Create a path under the test path provided by
* the FS contract
* @param filepath path string in
* @return a path qualified by the test filesystem
* @throws IOException IO problems
*/
protected Path path(String filepath) throws IOException {
return getFileSystem().makeQualified(
new Path(getContract().getTestPath(), filepath));
}
/**
* Take a simple path like "/something" and turn it into
* a qualified path against the test FS
* @param filepath path string in
* @return a path qualified by the test filesystem
* @throws IOException IO problems
*/
protected Path absolutepath(String filepath) throws IOException {
return getFileSystem().makeQualified(new Path(filepath));
}
/**
* List a path in the test FS
* @param path path to list
* @return the contents of the path/dir
* @throws IOException IO problems
*/
protected String ls(Path path) throws IOException {
return ContractTestUtils.ls(fileSystem, path);
}
/**
* Describe a test. This is a replacement for javadocs
* where the tests role is printed in the log output
* @param text description
*/
protected void describe(String text) {
LOG.info(text);
}
/**
* Handle the outcome of an operation not being the strictest
* exception desired, but one that, while still within the boundary
* of the contract, is a bit looser.
*
* If the FS contract says that they support the strictest exceptions,
* that is what they must return, and the exception here is rethrown
* @param action Action
* @param expectedException what was expected
* @param e exception that was received
*/
protected void handleRelaxedException(String action,
String expectedException,
Exception e) throws Exception {
if (getContract().isSupported(SUPPORTS_STRICT_EXCEPTIONS, false)) {
throw e;
}
LOG.warn("The expected exception {} was not the exception class" +
" raised on {}: {}", action , e.getClass(), expectedException, e);
}
/**
* Handle expected exceptions through logging and/or other actions
* @param e exception raised.
*/
protected void handleExpectedException(Exception e) {
getLog().debug("expected :{}" ,e, e);
}
/**
* assert that a path exists
* @param message message to use in an assertion
* @param path path to probe
* @throws IOException IO problems
*/
public void assertPathExists(String message, Path path) throws IOException {
ContractTestUtils.assertPathExists(fileSystem, message, path);
}
/**
* assert that a path does not
* @param message message to use in an assertion
* @param path path to probe
* @throws IOException IO problems
*/
public void assertPathDoesNotExist(String message, Path path) throws
IOException {
ContractTestUtils.assertPathDoesNotExist(fileSystem, message, path);
}
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
*
* @param filename name of the file
* @throws IOException IO problems during file operations
*/
protected void assertIsFile(Path filename) throws IOException {
ContractTestUtils.assertIsFile(fileSystem, filename);
}
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
*
* @param path name of the file
* @throws IOException IO problems during file operations
*/
protected void assertIsDirectory(Path path) throws IOException {
ContractTestUtils.assertIsDirectory(fileSystem, path);
}
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
*
* @throws IOException IO problems during file operations
*/
protected void mkdirs(Path path) throws IOException {
assertTrue("Failed to mkdir " + path, fileSystem.mkdirs(path));
}
/**
* Assert that a delete succeeded
* @param path path to delete
* @param recursive recursive flag
* @throws IOException IO problems
*/
protected void assertDeleted(Path path, boolean recursive) throws
IOException {
ContractTestUtils.assertDeleted(fileSystem, path, recursive);
}
/**
* Assert that the result value == -1; which implies
* that a read was successful
* @param text text to include in a message (usually the operation)
* @param result read result to validate
*/
protected void assertMinusOne(String text, int result) {
assertEquals(text + " wrong read result " + result, -1, result);
}
boolean rename(Path src, Path dst) throws IOException {
return getFileSystem().rename(src, dst);
}
protected String generateAndLogErrorListing(Path src, Path dst) throws
IOException {
FileSystem fs = getFileSystem();
getLog().error(
"src dir " + ContractTestUtils.ls(fs, src.getParent()));
String destDirLS = ContractTestUtils.ls(fs, dst.getParent());
if (fs.isDirectory(dst)) {
//include the dir into the listing
destDirLS = destDirLS + "\n" + ContractTestUtils.ls(fs, dst);
}
return destDirLS;
}
}
| 10,969 | 29.137363 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.contract.ContractTestUtils.assertFileHasLength;
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
/**
* Test concat -if supported
*/
public abstract class AbstractContractConcatTest extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractContractConcatTest.class);
private Path testPath;
private Path srcFile;
private Path zeroByteFile;
private Path target;
@Override
public void setup() throws Exception {
super.setup();
skipIfUnsupported(SUPPORTS_CONCAT);
//delete the test directory
testPath = path("test");
srcFile = new Path(testPath, "small.txt");
zeroByteFile = new Path(testPath, "zero.txt");
target = new Path(testPath, "target");
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
createFile(getFileSystem(), srcFile, false, block);
touch(getFileSystem(), zeroByteFile);
}
@Test
public void testConcatEmptyFiles() throws Throwable {
touch(getFileSystem(), target);
try {
getFileSystem().concat(target, new Path[0]);
fail("expected a failure");
} catch (Exception e) {
//expected
handleExpectedException(e);
}
}
@Test
public void testConcatMissingTarget() throws Throwable {
try {
getFileSystem().concat(target,
new Path[] { zeroByteFile});
fail("expected a failure");
} catch (Exception e) {
//expected
handleExpectedException(e);
}
}
@Test
public void testConcatFileOnFile() throws Throwable {
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
createFile(getFileSystem(), target, false, block);
getFileSystem().concat(target,
new Path[] {srcFile});
assertFileHasLength(getFileSystem(), target, TEST_FILE_LEN *2);
ContractTestUtils.validateFileContent(
ContractTestUtils.readDataset(getFileSystem(),
target, TEST_FILE_LEN * 2),
new byte[][]{block, block});
}
@Test
public void testConcatOnSelf() throws Throwable {
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
createFile(getFileSystem(), target, false, block);
try {
getFileSystem().concat(target,
new Path[]{target});
} catch (Exception e) {
//expected
handleExpectedException(e);
}
}
}
| 3,602 | 30.884956 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
import java.util.Random;
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyRead;
/**
* Test Seek operations
*/
public abstract class AbstractContractSeekTest extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractContractSeekTest.class);
public static final int DEFAULT_RANDOM_SEEK_COUNT = 100;
private Path testPath;
private Path smallSeekFile;
private Path zeroByteFile;
private FSDataInputStream instream;
@Override
public void setup() throws Exception {
super.setup();
skipIfUnsupported(SUPPORTS_SEEK);
//delete the test directory
testPath = getContract().getTestPath();
smallSeekFile = path("seekfile.txt");
zeroByteFile = path("zero.txt");
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value
createFile(getFileSystem(), smallSeekFile, false, block);
touch(getFileSystem(), zeroByteFile);
}
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, 4096);
return conf;
}
@Override
public void teardown() throws Exception {
IOUtils.closeStream(instream);
instream = null;
super.teardown();
}
@Test
public void testSeekZeroByteFile() throws Throwable {
describe("seek and read a 0 byte file");
instream = getFileSystem().open(zeroByteFile);
assertEquals(0, instream.getPos());
//expect initial read to fai;
int result = instream.read();
assertMinusOne("initial byte read", result);
byte[] buffer = new byte[1];
//expect that seek to 0 works
instream.seek(0);
//reread, expect same exception
result = instream.read();
assertMinusOne("post-seek byte read", result);
result = instream.read(buffer, 0, 1);
assertMinusOne("post-seek buffer read", result);
}
@Test
public void testBlockReadZeroByteFile() throws Throwable {
describe("do a block read on a 0 byte file");
instream = getFileSystem().open(zeroByteFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
byte[] buffer = new byte[1];
int result = instream.read(buffer, 0, 1);
assertMinusOne("block read zero byte file", result);
}
/**
* Seek and read on a closed file.
* Some filesystems let callers seek on a closed file -these must
* still fail on the subsequent reads.
* @throws Throwable
*/
@Test
public void testSeekReadClosedFile() throws Throwable {
boolean supportsSeekOnClosedFiles = isSupported(SUPPORTS_SEEK_ON_CLOSED_FILE);
instream = getFileSystem().open(smallSeekFile);
getLog().debug(
"Stream is of type " + instream.getClass().getCanonicalName());
instream.close();
try {
instream.seek(0);
if (!supportsSeekOnClosedFiles) {
fail("seek succeeded on a closed stream");
}
} catch (IOException e) {
//expected a closed file
}
try {
int data = instream.available();
fail("read() succeeded on a closed stream, got " + data);
} catch (IOException e) {
//expected a closed file
}
try {
int data = instream.read();
fail("read() succeeded on a closed stream, got " + data);
} catch (IOException e) {
//expected a closed file
}
try {
byte[] buffer = new byte[1];
int result = instream.read(buffer, 0, 1);
fail("read(buffer, 0, 1) succeeded on a closed stream, got " + result);
} catch (IOException e) {
//expected a closed file
}
//what position does a closed file have?
try {
long offset = instream.getPos();
} catch (IOException e) {
// its valid to raise error here; but the test is applied to make
// sure there's no other exception like an NPE.
}
//and close again
instream.close();
}
@Test
public void testNegativeSeek() throws Throwable {
instream = getFileSystem().open(smallSeekFile);
assertEquals(0, instream.getPos());
try {
instream.seek(-1);
long p = instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result = instream.read();
fail(
"expected an exception, got data " + result + " at a position of " + p);
} catch (EOFException e) {
//bad seek -expected
handleExpectedException(e);
} catch (IOException e) {
//bad seek -expected, but not as preferred as an EOFException
handleRelaxedException("a negative seek", "EOFException", e);
}
assertEquals(0, instream.getPos());
}
@Test
public void testSeekFile() throws Throwable {
describe("basic seek operations");
instream = getFileSystem().open(smallSeekFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
instream.seek(0);
int result = instream.read();
assertEquals(0, result);
assertEquals(1, instream.read());
assertEquals(2, instream.getPos());
assertEquals(2, instream.read());
assertEquals(3, instream.getPos());
instream.seek(128);
assertEquals(128, instream.getPos());
assertEquals(128, instream.read());
instream.seek(63);
assertEquals(63, instream.read());
}
@Test
public void testSeekAndReadPastEndOfFile() throws Throwable {
describe("verify that reading past the last bytes in the file returns -1");
instream = getFileSystem().open(smallSeekFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
//go just before the end
instream.seek(TEST_FILE_LEN - 2);
assertTrue("Premature EOF", instream.read() != -1);
assertTrue("Premature EOF", instream.read() != -1);
assertMinusOne("read past end of file", instream.read());
}
@Test
public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable {
describe("do a seek past the EOF, then verify the stream recovers");
instream = getFileSystem().open(smallSeekFile);
//go just before the end. This may or may not fail; it may be delayed until the
//read
boolean canSeekPastEOF =
!getContract().isSupported(ContractOptions.REJECTS_SEEK_PAST_EOF, true);
try {
instream.seek(TEST_FILE_LEN + 1);
//if this doesn't trigger, then read() is expected to fail
assertMinusOne("read after seeking past EOF", instream.read());
} catch (EOFException e) {
//This is an error iff the FS claims to be able to seek past the EOF
if (canSeekPastEOF) {
//a failure wasn't expected
throw e;
}
handleExpectedException(e);
} catch (IOException e) {
//This is an error iff the FS claims to be able to seek past the EOF
if (canSeekPastEOF) {
//a failure wasn't expected
throw e;
}
handleRelaxedException("a seek past the end of the file",
"EOFException", e);
}
//now go back and try to read from a valid point in the file
instream.seek(1);
assertTrue("Premature EOF", instream.read() != -1);
}
/**
* Seek round a file bigger than IO buffers
* @throws Throwable
*/
@Test
public void testSeekBigFile() throws Throwable {
describe("Seek round a large file and verify the bytes are what is expected");
Path testSeekFile = path("bigseekfile.txt");
byte[] block = dataset(65536, 0, 255);
createFile(getFileSystem(), testSeekFile, false, block);
instream = getFileSystem().open(testSeekFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
instream.seek(0);
int result = instream.read();
assertEquals(0, result);
assertEquals(1, instream.read());
assertEquals(2, instream.read());
//do seek 32KB ahead
instream.seek(32768);
assertEquals("@32768", block[32768], (byte) instream.read());
instream.seek(40000);
assertEquals("@40000", block[40000], (byte) instream.read());
instream.seek(8191);
assertEquals("@8191", block[8191], (byte) instream.read());
instream.seek(0);
assertEquals("@0", 0, (byte) instream.read());
}
@Test
public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
describe(
"verify that a positioned read does not change the getPos() value");
Path testSeekFile = path("bigseekfile.txt");
byte[] block = dataset(65536, 0, 255);
createFile(getFileSystem(), testSeekFile, false, block);
instream = getFileSystem().open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000, instream.getPos());
byte[] readBuffer = new byte[256];
instream.read(128, readBuffer, 0, readBuffer.length);
//have gone back
assertEquals(40000, instream.getPos());
//content is the same too
assertEquals("@40000", block[40000], (byte) instream.read());
//now verify the picked up data
for (int i = 0; i < 256; i++) {
assertEquals("@" + i, block[i + 128], readBuffer[i]);
}
}
/**
* Lifted from TestLocalFileSystem:
* Regression test for HADOOP-9307: BufferedFSInputStream returning
* wrong results after certain sequences of seeks and reads.
*/
@Test
public void testRandomSeeks() throws Throwable {
int limit = getContract().getLimit(TEST_RANDOM_SEEK_COUNT,
DEFAULT_RANDOM_SEEK_COUNT);
describe("Testing " + limit + " random seeks");
int filesize = 10 * 1024;
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
createFile(getFileSystem(), randomSeekFile, false, buf);
Random r = new Random();
FSDataInputStream stm = getFileSystem().open(randomSeekFile);
// Record the sequence of seeks and reads which trigger a failure.
int[] seeks = new int[10];
int[] reads = new int[10];
try {
for (int i = 0; i < limit; i++) {
int seekOff = r.nextInt(buf.length);
int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
seeks[i % seeks.length] = seekOff;
reads[i % reads.length] = toRead;
verifyRead(stm, buf, seekOff, toRead);
}
} catch (AssertionError afe) {
StringBuilder sb = new StringBuilder();
sb.append("Sequence of actions:\n");
for (int j = 0; j < seeks.length; j++) {
sb.append("seek @ ").append(seeks[j]).append(" ")
.append("read ").append(reads[j]).append("\n");
}
LOG.error(sb.toString());
throw afe;
} finally {
stm.close();
}
}
}
| 12,058 | 33.553009 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.ftp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.ftp.FTPFileSystem;
import java.io.IOException;
public class TestFTPContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new FTPContract(conf);
}
/**
* Check the exception was about cross-directory renames
* -if not, rethrow it.
* @param e exception raised
* @throws IOException
*/
private void verifyUnsupportedDirRenameException(IOException e) throws IOException {
if (!e.toString().contains(FTPFileSystem.E_SAME_DIRECTORY_ONLY)) {
throw e;
}
}
@Override
public void testRenameDirIntoExistingDir() throws Throwable {
try {
super.testRenameDirIntoExistingDir();
fail("Expected a failure");
} catch (IOException e) {
verifyUnsupportedDirRenameException(e);
}
}
@Override
public void testRenameFileNonexistentDir() throws Throwable {
try {
super.testRenameFileNonexistentDir();
fail("Expected a failure");
} catch (IOException e) {
verifyUnsupportedDirRenameException(e);
}
}
}
| 2,140 | 30.955224 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractDelete.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.ftp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestFTPContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new FTPContract(conf);
}
}
| 1,226 | 36.181818 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractCreate.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.ftp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestFTPContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new FTPContract(conf);
}
}
| 1,226 | 36.181818 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/FTPContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.ftp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
import org.junit.Assert;
import java.net.URI;
import static org.junit.Assert.assertNotNull;
/**
* The contract of FTP; requires the option "test.testdir" to be set
*/
public class FTPContract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "contract/ftp.xml";
/**
*
*/
public static final String TEST_FS_TESTDIR = "test.ftp.testdir";
private String fsName;
private URI fsURI;
private FileSystem fs;
public FTPContract(Configuration conf) {
super(conf);
//insert the base features
addConfResource(CONTRACT_XML);
}
@Override
public String getScheme() {
return "ftp";
}
@Override
public Path getTestPath() {
String pathString = getOption(TEST_FS_TESTDIR, null);
assertNotNull("Undefined test option " + TEST_FS_TESTDIR, pathString);
Path path = new Path(pathString);
return path;
}
}
| 1,931 | 29.1875 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractMkdir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.ftp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* Test dir operations on a the local FS.
*/
public class TestFTPContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new FTPContract(conf);
}
}
| 1,272 | 35.371429 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/TestFTPContractOpen.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.ftp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestFTPContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new FTPContract(conf);
}
}
| 1,220 | 36 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractMkdir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* Test dir operations on a the local FS.
*/
public class TestRawlocalContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RawlocalFSContract(conf);
}
}
| 1,289 | 35.857143 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractOpen.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestRawlocalContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RawlocalFSContract(conf);
}
}
| 1,236 | 37.65625 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractAppend.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestRawlocalContractAppend extends AbstractContractAppendTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RawlocalFSContract(conf);
}
}
| 1,243 | 36.69697 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/RawlocalFSContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.contract.localfs.LocalFSContract;
import java.io.File;
import java.io.IOException;
/**
* Raw local filesystem. This is the inner OS-layer FS
* before checksumming is added around it.
*/
public class RawlocalFSContract extends LocalFSContract {
public RawlocalFSContract(Configuration conf) {
super(conf);
}
public static final String RAW_CONTRACT_XML = "contract/localfs.xml";
@Override
protected String getContractXml() {
return RAW_CONTRACT_XML;
}
@Override
protected FileSystem getLocalFS() throws IOException {
return FileSystem.getLocal(getConf()).getRawFileSystem();
}
public File getTestDirectory() {
return new File(getTestDataDir());
}
}
| 1,663 | 30.396226 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractDelete.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestRawlocalContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RawlocalFSContract(conf);
}
}
| 1,243 | 36.69697 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.Test;
public class TestRawlocalContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RawlocalFSContract(conf);
}
/**
* Test fallback rename code <code>handleEmptyDstDirectoryOnWindows()</code>
* even on not Windows platform where the normal <code>File.renameTo()</code>
* is supposed to work well. This test has been added for HADOOP-9805.
*
* @see AbstractContractRenameTest#testRenameWithNonEmptySubDirPOSIX()
*/
@Test
public void testRenameWithNonEmptySubDirPOSIX() throws Throwable {
final Path renameTestDir = path("testRenameWithNonEmptySubDir");
final Path srcDir = new Path(renameTestDir, "src1");
final Path srcSubDir = new Path(srcDir, "sub");
final Path finalDir = new Path(renameTestDir, "dest");
FileSystem fs = getFileSystem();
ContractTestUtils.rm(fs, renameTestDir, true, false);
fs.mkdirs(srcDir);
fs.mkdirs(finalDir);
ContractTestUtils.writeTextFile(fs, new Path(srcDir, "source.txt"),
"this is the file in src dir", false);
ContractTestUtils.writeTextFile(fs, new Path(srcSubDir, "subfile.txt"),
"this is the file in src/sub dir", false);
ContractTestUtils.assertPathExists(fs, "not created in src dir",
new Path(srcDir, "source.txt"));
ContractTestUtils.assertPathExists(fs, "not created in src/sub dir",
new Path(srcSubDir, "subfile.txt"));
RawLocalFileSystem rlfs = (RawLocalFileSystem) fs;
rlfs.handleEmptyDstDirectoryOnWindows(srcDir, rlfs.pathToFile(srcDir),
finalDir, rlfs.pathToFile(finalDir));
// Accept only POSIX rename behavior in this test
ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
new Path(finalDir, "source.txt"));
ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
new Path(finalDir, "sub/subfile.txt"));
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted",
new Path(srcDir, "source.txt"));
}
}
| 3,296 | 40.2125 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractSeek.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestRawlocalContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RawlocalFSContract(conf);
}
}
| 1,236 | 37.65625 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractCreate.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestRawlocalContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RawlocalFSContract(conf);
}
}
| 1,243 | 36.69697 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractUnderlyingFileBehavior.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
public class TestRawLocalContractUnderlyingFileBehavior extends Assert {
private static File testDirectory;
@BeforeClass
public static void before() {
RawlocalFSContract contract =
new RawlocalFSContract(new Configuration());
testDirectory = contract.getTestDirectory();
testDirectory.mkdirs();
assertTrue(testDirectory.isDirectory());
}
@Test
public void testDeleteEmptyPath() throws Throwable {
File nonexistent = new File(testDirectory, "testDeleteEmptyPath");
assertFalse(nonexistent.exists());
assertFalse("nonexistent.delete() returned true", nonexistent.delete());
}
}
| 1,686 | 32.74 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractOpen.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestLocalFSContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);
}
}
| 1,231 | 37.5 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractAppend.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestLocalFSContractAppend extends AbstractContractAppendTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);
}
}
| 1,238 | 36.545455 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractLoaded.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.junit.Test;
import java.net.URL;
/**
* just here to make sure that the local.xml resource is actually loading
*/
public class TestLocalFSContractLoaded extends AbstractFSContractTestBase {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);
}
@Test
public void testContractWorks() throws Throwable {
String key = getContract().getConfKey(SUPPORTS_ATOMIC_RENAME);
assertNotNull("not set: " + key, getContract().getConf().get(key));
assertTrue("not true: " + key,
getContract().isSupported(SUPPORTS_ATOMIC_RENAME, false));
}
@Test
public void testContractResourceOnClasspath() throws Throwable {
URL url = this.getClass()
.getClassLoader()
.getResource(LocalFSContract.CONTRACT_XML);
assertNotNull("could not find contract resource", url);
}
}
| 1,955 | 35.90566 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestLocalFSContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);
}
}
| 1,238 | 36.545455 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/LocalFSContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractOptions;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.util.Shell;
import java.io.IOException;
/**
* The contract of the Local filesystem.
* This changes its feature set from platform for platform -the default
* set is updated during initialization.
*
* This contract contains some override points, to permit
* the raw local filesystem and other filesystems to subclass it.
*/
public class LocalFSContract extends AbstractFSContract {
public static final String CONTRACT_XML = "contract/localfs.xml";
public static final String SYSPROP_TEST_BUILD_DATA = "test.build.data";
public static final String DEFAULT_TEST_BUILD_DATA_DIR = "test/build/data";
private FileSystem fs;
public LocalFSContract(Configuration conf) {
super(conf);
//insert the base features
addConfResource(getContractXml());
}
/**
* Return the contract file for this filesystem
* @return the XML
*/
protected String getContractXml() {
return CONTRACT_XML;
}
@Override
public void init() throws IOException {
super.init();
fs = getLocalFS();
adjustContractToLocalEnvironment();
}
/**
* tweak some of the contract parameters based on the local system
* state
*/
protected void adjustContractToLocalEnvironment() {
if (Shell.WINDOWS) {
//NTFS doesn't do case sensitivity, and its permissions are ACL-based
getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
} else if (ContractTestUtils.isOSX()) {
//OSX HFS+ is not case sensitive
getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
false);
}
}
/**
* Get the local filesystem. This may be overridden
* @return the filesystem
* @throws IOException
*/
protected FileSystem getLocalFS() throws IOException {
return FileSystem.getLocal(getConf());
}
@Override
public FileSystem getTestFileSystem() throws IOException {
return fs;
}
@Override
public String getScheme() {
return "file";
}
@Override
public Path getTestPath() {
Path path = fs.makeQualified(new Path(
getTestDataDir()));
return path;
}
/**
* Get the test data directory
* @return the directory for test data
*/
protected String getTestDataDir() {
return System.getProperty(SYSPROP_TEST_BUILD_DATA, DEFAULT_TEST_BUILD_DATA_DIR);
}
}
| 3,600 | 29.777778 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractDelete.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestLocalFSContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);
}
}
| 1,238 | 36.545455 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestLocalFSContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);
}
}
| 1,238 | 36.545455 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractSeek.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestLocalFSContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);
}
}
| 1,231 | 37.5 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMkdir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.localfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* Test dir operations on a the local FS.
*/
public class TestLocalFSContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);
}
}
| 1,284 | 35.714286 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import java.util.List;
import junit.framework.TestCase;
public class TestGetInstances extends TestCase {
interface SampleInterface {}
interface ChildInterface extends SampleInterface {}
static class SampleClass implements SampleInterface {
SampleClass() {}
}
static class AnotherClass implements ChildInterface {
AnotherClass() {}
}
/**
* Makes sure <code>Configuration.getInstances()</code> returns
* instances of the required type.
*/
public void testGetInstances() throws Exception {
Configuration conf = new Configuration();
List<SampleInterface> classes =
conf.getInstances("no.such.property", SampleInterface.class);
assertTrue(classes.isEmpty());
conf.set("empty.property", "");
classes = conf.getInstances("empty.property", SampleInterface.class);
assertTrue(classes.isEmpty());
conf.setStrings("some.classes",
SampleClass.class.getName(), AnotherClass.class.getName());
classes = conf.getInstances("some.classes", SampleInterface.class);
assertEquals(2, classes.size());
try {
conf.setStrings("some.classes",
SampleClass.class.getName(), AnotherClass.class.getName(),
String.class.getName());
conf.getInstances("some.classes", SampleInterface.class);
fail("java.lang.String does not implement SampleInterface");
} catch (RuntimeException e) {}
try {
conf.setStrings("some.classes",
SampleClass.class.getName(), AnotherClass.class.getName(),
"no.such.Class");
conf.getInstances("some.classes", SampleInterface.class);
fail("no.such.Class does not exist");
} catch (RuntimeException e) {}
}
}
| 2,554 | 33.066667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.regex.Pattern;
import static java.util.concurrent.TimeUnit.*;
import junit.framework.TestCase;
import static org.junit.Assert.assertArrayEquals;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
import static org.junit.Assert.fail;
import org.codehaus.jackson.map.ObjectMapper;
public class TestConfiguration extends TestCase {
private Configuration conf;
final static String CONFIG = new File("./test-config-TestConfiguration.xml").getAbsolutePath();
final static String CONFIG2 = new File("./test-config2-TestConfiguration.xml").getAbsolutePath();
final static String CONFIG_FOR_ENUM = new File("./test-config-enum-TestConfiguration.xml").getAbsolutePath();
private static final String CONFIG_MULTI_BYTE = new File(
"./test-config-multi-byte-TestConfiguration.xml").getAbsolutePath();
private static final String CONFIG_MULTI_BYTE_SAVED = new File(
"./test-config-multi-byte-saved-TestConfiguration.xml").getAbsolutePath();
final static Random RAN = new Random();
final static String XMLHEADER =
IBM_JAVA?"<?xml version=\"1.0\" encoding=\"UTF-8\"?><configuration>":
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>";
@Override
protected void setUp() throws Exception {
super.setUp();
conf = new Configuration();
}
@Override
protected void tearDown() throws Exception {
super.tearDown();
new File(CONFIG).delete();
new File(CONFIG2).delete();
new File(CONFIG_FOR_ENUM).delete();
new File(CONFIG_MULTI_BYTE).delete();
new File(CONFIG_MULTI_BYTE_SAVED).delete();
}
private void startConfig() throws IOException{
out.write("<?xml version=\"1.0\"?>\n");
out.write("<configuration>\n");
}
private void endConfig() throws IOException{
out.write("</configuration>\n");
out.close();
}
private void addInclude(String filename) throws IOException{
out.write("<xi:include href=\"" + filename + "\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />\n ");
}
public void testInputStreamResource() throws Exception {
StringWriter writer = new StringWriter();
out = new BufferedWriter(writer);
startConfig();
declareProperty("prop", "A", "A");
endConfig();
InputStream in1 = new ByteArrayInputStream(writer.toString().getBytes());
Configuration conf = new Configuration(false);
conf.addResource(in1);
assertEquals("A", conf.get("prop"));
InputStream in2 = new ByteArrayInputStream(writer.toString().getBytes());
conf.addResource(in2);
assertEquals("A", conf.get("prop"));
}
/**
* Tests use of multi-byte characters in property names and values. This test
* round-trips multi-byte string literals through saving and loading of config
* and asserts that the same values were read.
*/
public void testMultiByteCharacters() throws IOException {
String priorDefaultEncoding = System.getProperty("file.encoding");
try {
System.setProperty("file.encoding", "US-ASCII");
String name = "multi_byte_\u611b_name";
String value = "multi_byte_\u0641_value";
out = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(CONFIG_MULTI_BYTE), "UTF-8"));
startConfig();
declareProperty(name, value, value);
endConfig();
Configuration conf = new Configuration(false);
conf.addResource(new Path(CONFIG_MULTI_BYTE));
assertEquals(value, conf.get(name));
FileOutputStream fos = new FileOutputStream(CONFIG_MULTI_BYTE_SAVED);
try {
conf.writeXml(fos);
} finally {
IOUtils.closeStream(fos);
}
conf = new Configuration(false);
conf.addResource(new Path(CONFIG_MULTI_BYTE_SAVED));
assertEquals(value, conf.get(name));
} finally {
System.setProperty("file.encoding", priorDefaultEncoding);
}
}
public void testVariableSubstitution() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
declareProperty("my.int", "${intvar}", "42");
declareProperty("intvar", "42", "42");
declareProperty("my.base", "/tmp/${user.name}", UNSPEC);
declareProperty("my.file", "hello", "hello");
declareProperty("my.suffix", ".txt", ".txt");
declareProperty("my.relfile", "${my.file}${my.suffix}", "hello.txt");
declareProperty("my.fullfile", "${my.base}/${my.file}${my.suffix}", UNSPEC);
// check that undefined variables are returned as-is
declareProperty("my.failsexpand", "a${my.undefvar}b", "a${my.undefvar}b");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
for (Prop p : props) {
System.out.println("p=" + p.name);
String gotVal = conf.get(p.name);
String gotRawVal = conf.getRaw(p.name);
assertEq(p.val, gotRawVal);
if (p.expectEval == UNSPEC) {
// expansion is system-dependent (uses System properties)
// can't do exact match so just check that all variables got expanded
assertTrue(gotVal != null && -1 == gotVal.indexOf("${"));
} else {
assertEq(p.expectEval, gotVal);
}
}
// check that expansion also occurs for getInt()
assertTrue(conf.getInt("intvar", -1) == 42);
assertTrue(conf.getInt("my.int", -1) == 42);
Map<String, String> results = conf.getValByRegex("^my.*file$");
assertTrue(results.keySet().contains("my.relfile"));
assertTrue(results.keySet().contains("my.fullfile"));
assertTrue(results.keySet().contains("my.file"));
assertEquals(-1, results.get("my.relfile").indexOf("${"));
assertEquals(-1, results.get("my.fullfile").indexOf("${"));
assertEquals(-1, results.get("my.file").indexOf("${"));
}
public void testFinalParam() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
declareProperty("my.var", "", "", true);
endConfig();
Path fileResource = new Path(CONFIG);
Configuration conf1 = new Configuration();
conf1.addResource(fileResource);
assertNull("my var is not null", conf1.get("my.var"));
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
declareProperty("my.var", "myval", "myval", false);
endConfig();
fileResource = new Path(CONFIG2);
Configuration conf2 = new Configuration(conf1);
conf2.addResource(fileResource);
assertNull("my var is not final", conf2.get("my.var"));
}
public static void assertEq(Object a, Object b) {
System.out.println("assertEq: " + a + ", " + b);
assertEquals(a, b);
}
static class Prop {
String name;
String val;
String expectEval;
}
final String UNSPEC = null;
ArrayList<Prop> props = new ArrayList<Prop>();
void declareProperty(String name, String val, String expectEval)
throws IOException {
declareProperty(name, val, expectEval, false);
}
void declareProperty(String name, String val, String expectEval,
boolean isFinal)
throws IOException {
appendProperty(name, val, isFinal);
Prop p = new Prop();
p.name = name;
p.val = val;
p.expectEval = expectEval;
props.add(p);
}
void appendProperty(String name, String val) throws IOException {
appendProperty(name, val, false);
}
void appendProperty(String name, String val, boolean isFinal,
String ... sources)
throws IOException {
out.write("<property>");
out.write("<name>");
out.write(name);
out.write("</name>");
out.write("<value>");
out.write(val);
out.write("</value>");
if (isFinal) {
out.write("<final>true</final>");
}
for(String s : sources) {
out.write("<source>");
out.write(s);
out.write("</source>");
}
out.write("</property>\n");
}
public void testOverlay() throws IOException{
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("a","b");
appendProperty("b","c");
appendProperty("d","e");
appendProperty("e","f", true);
endConfig();
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
appendProperty("a","b");
appendProperty("b","d");
appendProperty("e","e");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
//set dynamically something
conf.set("c","d");
conf.set("a","d");
Configuration clone=new Configuration(conf);
clone.addResource(new Path(CONFIG2));
assertEquals(clone.get("a"), "d");
assertEquals(clone.get("b"), "d");
assertEquals(clone.get("c"), "d");
assertEquals(clone.get("d"), "e");
assertEquals(clone.get("e"), "f");
}
public void testCommentsInValue() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("my.comment", "this <!--comment here--> contains a comment");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
//two spaces one after "this", one before "contains"
assertEquals("this contains a comment", conf.get("my.comment"));
}
public void testTrim() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
String[] whitespaces = {"", " ", "\n", "\t"};
String[] name = new String[100];
for(int i = 0; i < name.length; i++) {
name[i] = "foo" + i;
StringBuilder prefix = new StringBuilder();
StringBuilder postfix = new StringBuilder();
for(int j = 0; j < 3; j++) {
prefix.append(whitespaces[RAN.nextInt(whitespaces.length)]);
postfix.append(whitespaces[RAN.nextInt(whitespaces.length)]);
}
appendProperty(prefix + name[i] + postfix, name[i] + ".value");
}
endConfig();
conf.addResource(new Path(CONFIG));
for(String n : name) {
assertEquals(n + ".value", conf.get(n));
}
}
public void testGetLocalPath() throws IOException {
Configuration conf = new Configuration();
String[] dirs = new String[]{"a", "b", "c"};
for (int i = 0; i < dirs.length; i++) {
dirs[i] = new Path(System.getProperty("test.build.data"), dirs[i])
.toString();
}
conf.set("dirs", StringUtils.join(dirs, ","));
for (int i = 0; i < 1000; i++) {
String localPath = conf.getLocalPath("dirs", "dir" + i).toString();
assertTrue("Path doesn't end in specified dir: " + localPath,
localPath.endsWith("dir" + i));
assertFalse("Path has internal whitespace: " + localPath,
localPath.contains(" "));
}
}
public void testGetFile() throws IOException {
Configuration conf = new Configuration();
String[] dirs = new String[]{"a", "b", "c"};
for (int i = 0; i < dirs.length; i++) {
dirs[i] = new Path(System.getProperty("test.build.data"), dirs[i])
.toString();
}
conf.set("dirs", StringUtils.join(dirs, ","));
for (int i = 0; i < 1000; i++) {
String localPath = conf.getFile("dirs", "dir" + i).toString();
assertTrue("Path doesn't end in specified dir: " + localPath,
localPath.endsWith("dir" + i));
assertFalse("Path has internal whitespace: " + localPath,
localPath.contains(" "));
}
}
public void testToString() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
String expectedOutput =
"Configuration: core-default.xml, core-site.xml, " +
fileResource.toString();
assertEquals(expectedOutput, conf.toString());
}
public void testWriteXml() throws IOException {
Configuration conf = new Configuration();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
conf.writeXml(baos);
String result = baos.toString();
assertTrue("Result has proper header", result.startsWith(XMLHEADER));
assertTrue("Result has proper footer", result.endsWith("</configuration>"));
}
public void testIncludes() throws Exception {
tearDown();
System.out.println("XXX testIncludes");
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
appendProperty("a","b");
appendProperty("c","d");
endConfig();
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
addInclude(CONFIG2);
appendProperty("e","f");
appendProperty("g","h");
endConfig();
// verify that the includes file contains all properties
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(conf.get("a"), "b");
assertEquals(conf.get("c"), "d");
assertEquals(conf.get("e"), "f");
assertEquals(conf.get("g"), "h");
tearDown();
}
public void testRelativeIncludes() throws Exception {
tearDown();
String relConfig = new File("./tmp/test-config.xml").getAbsolutePath();
String relConfig2 = new File("./tmp/test-config2.xml").getAbsolutePath();
new File(new File(relConfig).getParent()).mkdirs();
out = new BufferedWriter(new FileWriter(relConfig2));
startConfig();
appendProperty("a", "b");
endConfig();
out = new BufferedWriter(new FileWriter(relConfig));
startConfig();
// Add the relative path instead of the absolute one.
addInclude(new File(relConfig2).getName());
appendProperty("c", "d");
endConfig();
// verify that the includes file contains all properties
Path fileResource = new Path(relConfig);
conf.addResource(fileResource);
assertEquals(conf.get("a"), "b");
assertEquals(conf.get("c"), "d");
// Cleanup
new File(relConfig).delete();
new File(relConfig2).delete();
new File(new File(relConfig).getParent()).delete();
}
BufferedWriter out;
public void testIntegerRanges() {
Configuration conf = new Configuration();
conf.set("first", "-100");
conf.set("second", "4-6,9-10,27");
conf.set("third", "34-");
Configuration.IntegerRanges range = conf.getRange("first", null);
System.out.println("first = " + range);
assertEquals(true, range.isIncluded(0));
assertEquals(true, range.isIncluded(1));
assertEquals(true, range.isIncluded(100));
assertEquals(false, range.isIncluded(101));
range = conf.getRange("second", null);
System.out.println("second = " + range);
assertEquals(false, range.isIncluded(3));
assertEquals(true, range.isIncluded(4));
assertEquals(true, range.isIncluded(6));
assertEquals(false, range.isIncluded(7));
assertEquals(false, range.isIncluded(8));
assertEquals(true, range.isIncluded(9));
assertEquals(true, range.isIncluded(10));
assertEquals(false, range.isIncluded(11));
assertEquals(false, range.isIncluded(26));
assertEquals(true, range.isIncluded(27));
assertEquals(false, range.isIncluded(28));
range = conf.getRange("third", null);
System.out.println("third = " + range);
assertEquals(false, range.isIncluded(33));
assertEquals(true, range.isIncluded(34));
assertEquals(true, range.isIncluded(100000000));
}
public void testGetRangeIterator() throws Exception {
Configuration config = new Configuration(false);
IntegerRanges ranges = config.getRange("Test", "");
assertFalse("Empty range has values", ranges.iterator().hasNext());
ranges = config.getRange("Test", "5");
Set<Integer> expected = new HashSet<Integer>(Arrays.asList(5));
Set<Integer> found = new HashSet<Integer>();
for(Integer i: ranges) {
found.add(i);
}
assertEquals(expected, found);
ranges = config.getRange("Test", "5-10,13-14");
expected = new HashSet<Integer>(Arrays.asList(5,6,7,8,9,10,13,14));
found = new HashSet<Integer>();
for(Integer i: ranges) {
found.add(i);
}
assertEquals(expected, found);
ranges = config.getRange("Test", "8-12, 5- 7");
expected = new HashSet<Integer>(Arrays.asList(5,6,7,8,9,10,11,12));
found = new HashSet<Integer>();
for(Integer i: ranges) {
found.add(i);
}
assertEquals(expected, found);
}
public void testHexValues() throws IOException{
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.hex1", "0x10");
appendProperty("test.hex2", "0xF");
appendProperty("test.hex3", "-0x10");
// Invalid?
appendProperty("test.hex4", "-0x10xyz");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(16, conf.getInt("test.hex1", 0));
assertEquals(16, conf.getLong("test.hex1", 0));
assertEquals(15, conf.getInt("test.hex2", 0));
assertEquals(15, conf.getLong("test.hex2", 0));
assertEquals(-16, conf.getInt("test.hex3", 0));
assertEquals(-16, conf.getLong("test.hex3", 0));
try {
conf.getLong("test.hex4", 0);
fail("Property had invalid long value, but was read successfully.");
} catch (NumberFormatException e) {
// pass
}
try {
conf.getInt("test.hex4", 0);
fail("Property had invalid int value, but was read successfully.");
} catch (NumberFormatException e) {
// pass
}
}
public void testIntegerValues() throws IOException{
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.int1", "20");
appendProperty("test.int2", "020");
appendProperty("test.int3", "-20");
appendProperty("test.int4", " -20 ");
appendProperty("test.int5", " -20xyz ");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(20, conf.getInt("test.int1", 0));
assertEquals(20, conf.getLong("test.int1", 0));
assertEquals(20, conf.getLongBytes("test.int1", 0));
assertEquals(20, conf.getInt("test.int2", 0));
assertEquals(20, conf.getLong("test.int2", 0));
assertEquals(20, conf.getLongBytes("test.int2", 0));
assertEquals(-20, conf.getInt("test.int3", 0));
assertEquals(-20, conf.getLong("test.int3", 0));
assertEquals(-20, conf.getLongBytes("test.int3", 0));
assertEquals(-20, conf.getInt("test.int4", 0));
assertEquals(-20, conf.getLong("test.int4", 0));
assertEquals(-20, conf.getLongBytes("test.int4", 0));
try {
conf.getInt("test.int5", 0);
fail("Property had invalid int value, but was read successfully.");
} catch (NumberFormatException e) {
// pass
}
}
public void testHumanReadableValues() throws IOException {
out = new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.humanReadableValue1", "1m");
appendProperty("test.humanReadableValue2", "1M");
appendProperty("test.humanReadableValue5", "1MBCDE");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(1048576, conf.getLongBytes("test.humanReadableValue1", 0));
assertEquals(1048576, conf.getLongBytes("test.humanReadableValue2", 0));
try {
conf.getLongBytes("test.humanReadableValue5", 0);
fail("Property had invalid human readable value, but was read successfully.");
} catch (NumberFormatException e) {
// pass
}
}
public void testBooleanValues() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.bool1", "true");
appendProperty("test.bool2", "false");
appendProperty("test.bool3", " true ");
appendProperty("test.bool4", " false ");
appendProperty("test.bool5", "foo");
appendProperty("test.bool6", "TRUE");
appendProperty("test.bool7", "FALSE");
appendProperty("test.bool8", "");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(true, conf.getBoolean("test.bool1", false));
assertEquals(false, conf.getBoolean("test.bool2", true));
assertEquals(true, conf.getBoolean("test.bool3", false));
assertEquals(false, conf.getBoolean("test.bool4", true));
assertEquals(true, conf.getBoolean("test.bool5", true));
assertEquals(true, conf.getBoolean("test.bool6", false));
assertEquals(false, conf.getBoolean("test.bool7", true));
assertEquals(false, conf.getBoolean("test.bool8", false));
}
public void testFloatValues() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.float1", "3.1415");
appendProperty("test.float2", "003.1415");
appendProperty("test.float3", "-3.1415");
appendProperty("test.float4", " -3.1415 ");
appendProperty("test.float5", "xyz-3.1415xyz");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(3.1415f, conf.getFloat("test.float1", 0.0f));
assertEquals(3.1415f, conf.getFloat("test.float2", 0.0f));
assertEquals(-3.1415f, conf.getFloat("test.float3", 0.0f));
assertEquals(-3.1415f, conf.getFloat("test.float4", 0.0f));
try {
conf.getFloat("test.float5", 0.0f);
fail("Property had invalid float value, but was read successfully.");
} catch (NumberFormatException e) {
// pass
}
}
public void testDoubleValues() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.double1", "3.1415");
appendProperty("test.double2", "003.1415");
appendProperty("test.double3", "-3.1415");
appendProperty("test.double4", " -3.1415 ");
appendProperty("test.double5", "xyz-3.1415xyz");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(3.1415, conf.getDouble("test.double1", 0.0));
assertEquals(3.1415, conf.getDouble("test.double2", 0.0));
assertEquals(-3.1415, conf.getDouble("test.double3", 0.0));
assertEquals(-3.1415, conf.getDouble("test.double4", 0.0));
try {
conf.getDouble("test.double5", 0.0);
fail("Property had invalid double value, but was read successfully.");
} catch (NumberFormatException e) {
// pass
}
}
public void testGetClass() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.class1", "java.lang.Integer");
appendProperty("test.class2", " java.lang.Integer ");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("java.lang.Integer", conf.getClass("test.class1", null).getCanonicalName());
assertEquals("java.lang.Integer", conf.getClass("test.class2", null).getCanonicalName());
}
public void testGetClasses() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.classes1", "java.lang.Integer,java.lang.String");
appendProperty("test.classes2", " java.lang.Integer , java.lang.String ");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
String[] expectedNames = {"java.lang.Integer", "java.lang.String"};
Class<?>[] defaultClasses = {};
Class<?>[] classes1 = conf.getClasses("test.classes1", defaultClasses);
Class<?>[] classes2 = conf.getClasses("test.classes2", defaultClasses);
assertArrayEquals(expectedNames, extractClassNames(classes1));
assertArrayEquals(expectedNames, extractClassNames(classes2));
}
public void testGetStringCollection() throws IOException {
Configuration c = new Configuration();
c.set("x", " a, b\n,\nc ");
Collection<String> strs = c.getTrimmedStringCollection("x");
assertEquals(3, strs.size());
assertArrayEquals(new String[]{ "a", "b", "c" },
strs.toArray(new String[0]));
// Check that the result is mutable
strs.add("z");
// Make sure same is true for missing config
strs = c.getStringCollection("does-not-exist");
assertEquals(0, strs.size());
strs.add("z");
}
public void testGetTrimmedStringCollection() throws IOException {
Configuration c = new Configuration();
c.set("x", "a, b, c");
Collection<String> strs = c.getStringCollection("x");
assertEquals(3, strs.size());
assertArrayEquals(new String[]{ "a", " b", " c" },
strs.toArray(new String[0]));
// Check that the result is mutable
strs.add("z");
// Make sure same is true for missing config
strs = c.getStringCollection("does-not-exist");
assertEquals(0, strs.size());
strs.add("z");
}
private static String[] extractClassNames(Class<?>[] classes) {
String[] classNames = new String[classes.length];
for (int i = 0; i < classNames.length; i++) {
classNames[i] = classes[i].getCanonicalName();
}
return classNames;
}
enum Dingo { FOO, BAR };
enum Yak { RAB, FOO };
public void testEnum() throws IOException {
Configuration conf = new Configuration();
conf.setEnum("test.enum", Dingo.FOO);
assertSame(Dingo.FOO, conf.getEnum("test.enum", Dingo.BAR));
assertSame(Yak.FOO, conf.getEnum("test.enum", Yak.RAB));
conf.setEnum("test.enum", Dingo.FOO);
boolean fail = false;
try {
conf.setEnum("test.enum", Dingo.BAR);
Yak y = conf.getEnum("test.enum", Yak.FOO);
} catch (IllegalArgumentException e) {
fail = true;
}
assertTrue(fail);
}
public void testEnumFromXml() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG_FOR_ENUM));
startConfig();
appendProperty("test.enum"," \t \n FOO \t \n");
appendProperty("test.enum2"," \t \n Yak.FOO \t \n");
endConfig();
Configuration conf = new Configuration();
Path fileResource = new Path(CONFIG_FOR_ENUM);
conf.addResource(fileResource);
assertSame(Yak.FOO, conf.getEnum("test.enum", Yak.FOO));
boolean fail = false;
try {
conf.getEnum("test.enum2", Yak.FOO);
} catch (IllegalArgumentException e) {
fail = true;
}
assertTrue(fail);
}
public void testTimeDuration() {
Configuration conf = new Configuration(false);
conf.setTimeDuration("test.time.a", 7L, SECONDS);
assertEquals("7s", conf.get("test.time.a"));
assertEquals(0L, conf.getTimeDuration("test.time.a", 30, MINUTES));
assertEquals(7L, conf.getTimeDuration("test.time.a", 30, SECONDS));
assertEquals(7000L, conf.getTimeDuration("test.time.a", 30, MILLISECONDS));
assertEquals(7000000L,
conf.getTimeDuration("test.time.a", 30, MICROSECONDS));
assertEquals(7000000000L,
conf.getTimeDuration("test.time.a", 30, NANOSECONDS));
conf.setTimeDuration("test.time.b", 1, DAYS);
assertEquals("1d", conf.get("test.time.b"));
assertEquals(1, conf.getTimeDuration("test.time.b", 1, DAYS));
assertEquals(24, conf.getTimeDuration("test.time.b", 1, HOURS));
assertEquals(MINUTES.convert(1, DAYS),
conf.getTimeDuration("test.time.b", 1, MINUTES));
// check default
assertEquals(30L, conf.getTimeDuration("test.time.X", 30, SECONDS));
conf.set("test.time.X", "30");
assertEquals(30L, conf.getTimeDuration("test.time.X", 40, SECONDS));
for (Configuration.ParsedTimeDuration ptd :
Configuration.ParsedTimeDuration.values()) {
conf.setTimeDuration("test.time.unit", 1, ptd.unit());
assertEquals(1 + ptd.suffix(), conf.get("test.time.unit"));
assertEquals(1, conf.getTimeDuration("test.time.unit", 2, ptd.unit()));
}
}
public void testPattern() throws IOException {
out = new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.pattern1", "");
appendProperty("test.pattern2", "(");
appendProperty("test.pattern3", "a+b");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
Pattern defaultPattern = Pattern.compile("x+");
// Return default if missing
assertEquals(defaultPattern.pattern(),
conf.getPattern("xxxxx", defaultPattern).pattern());
// Return null if empty and default is null
assertNull(conf.getPattern("test.pattern1", null));
// Return default for empty
assertEquals(defaultPattern.pattern(),
conf.getPattern("test.pattern1", defaultPattern).pattern());
// Return default for malformed
assertEquals(defaultPattern.pattern(),
conf.getPattern("test.pattern2", defaultPattern).pattern());
// Works for correct patterns
assertEquals("a+b",
conf.getPattern("test.pattern3", defaultPattern).pattern());
}
public void testPropertySource() throws IOException {
out = new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.foo", "bar");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
conf.set("fs.defaultFS", "value");
String [] sources = conf.getPropertySources("test.foo");
assertEquals(1, sources.length);
assertEquals(
"Resource string returned for a file-loaded property" +
" must be a proper absolute path",
fileResource,
new Path(sources[0]));
assertArrayEquals("Resource string returned for a set() property must be " +
"\"programatically\"",
new String[]{"programatically"},
conf.getPropertySources("fs.defaultFS"));
assertEquals("Resource string returned for an unset property must be null",
null, conf.getPropertySources("fs.defaultFoo"));
}
public void testMultiplePropertySource() throws IOException {
out = new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.foo", "bar", false, "a", "b", "c");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
String [] sources = conf.getPropertySources("test.foo");
assertEquals(4, sources.length);
assertEquals("a", sources[0]);
assertEquals("b", sources[1]);
assertEquals("c", sources[2]);
assertEquals(
"Resource string returned for a file-loaded property" +
" must be a proper absolute path",
fileResource,
new Path(sources[3]));
}
public void testSocketAddress() throws IOException {
Configuration conf = new Configuration();
final String defaultAddr = "host:1";
final int defaultPort = 2;
InetSocketAddress addr = null;
addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
conf.set("myAddress", "host2");
addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
assertEquals("host2:"+defaultPort, NetUtils.getHostPortString(addr));
conf.set("myAddress", "host2:3");
addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
assertEquals("host2:3", NetUtils.getHostPortString(addr));
conf.set("myAddress", " \n \t host4:5 \t \n ");
addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
assertEquals("host4:5", NetUtils.getHostPortString(addr));
boolean threwException = false;
conf.set("myAddress", "bad:-port");
try {
addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
} catch (IllegalArgumentException iae) {
threwException = true;
assertEquals("Does not contain a valid host:port authority: " +
"bad:-port (configuration property 'myAddress')",
iae.getMessage());
} finally {
assertTrue(threwException);
}
}
public void testSetSocketAddress() throws IOException {
Configuration conf = new Configuration();
NetUtils.addStaticResolution("host", "127.0.0.1");
final String defaultAddr = "host:1";
InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
conf.setSocketAddr("myAddress", addr);
assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
}
public void testUpdateSocketAddress() throws IOException {
InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
InetSocketAddress connectAddr = conf.updateConnectAddr("myAddress", addr);
assertEquals(connectAddr.getHostName(), addr.getHostName());
addr = new InetSocketAddress(1);
connectAddr = conf.updateConnectAddr("myAddress", addr);
assertEquals(connectAddr.getHostName(),
InetAddress.getLocalHost().getHostName());
}
public void testReload() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.key1", "final-value1", true);
appendProperty("test.key2", "value2");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
appendProperty("test.key1", "value1");
appendProperty("test.key3", "value3");
endConfig();
Path fileResource1 = new Path(CONFIG2);
conf.addResource(fileResource1);
// add a few values via set.
conf.set("test.key3", "value4");
conf.set("test.key4", "value5");
assertEquals("final-value1", conf.get("test.key1"));
assertEquals("value2", conf.get("test.key2"));
assertEquals("value4", conf.get("test.key3"));
assertEquals("value5", conf.get("test.key4"));
// change values in the test file...
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.key1", "final-value1");
appendProperty("test.key3", "final-value3", true);
endConfig();
conf.reloadConfiguration();
assertEquals("value1", conf.get("test.key1"));
// overlayed property overrides.
assertEquals("value4", conf.get("test.key3"));
assertEquals(null, conf.get("test.key2"));
assertEquals("value5", conf.get("test.key4"));
}
public void testSize() throws IOException {
Configuration conf = new Configuration(false);
conf.set("a", "A");
conf.set("b", "B");
assertEquals(2, conf.size());
}
public void testClear() throws IOException {
Configuration conf = new Configuration(false);
conf.set("a", "A");
conf.set("b", "B");
conf.clear();
assertEquals(0, conf.size());
assertFalse(conf.iterator().hasNext());
}
public static class Fake_ClassLoader extends ClassLoader {
}
public void testClassLoader() {
Configuration conf = new Configuration(false);
conf.setQuietMode(false);
conf.setClassLoader(new Fake_ClassLoader());
Configuration other = new Configuration(conf);
assertTrue(other.getClassLoader() instanceof Fake_ClassLoader);
}
static class JsonConfiguration {
JsonProperty[] properties;
public JsonProperty[] getProperties() {
return properties;
}
public void setProperties(JsonProperty[] properties) {
this.properties = properties;
}
}
static class JsonProperty {
String key;
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public boolean getIsFinal() {
return isFinal;
}
public void setIsFinal(boolean isFinal) {
this.isFinal = isFinal;
}
public String getResource() {
return resource;
}
public void setResource(String resource) {
this.resource = resource;
}
String value;
boolean isFinal;
String resource;
}
public void testGetSetTrimmedNames() throws IOException {
Configuration conf = new Configuration(false);
conf.set(" name", "value");
assertEquals("value", conf.get("name"));
assertEquals("value", conf.get(" name"));
assertEquals("value", conf.getRaw(" name "));
}
public void testDumpConfiguration () throws IOException {
StringWriter outWriter = new StringWriter();
Configuration.dumpConfiguration(conf, outWriter);
String jsonStr = outWriter.toString();
ObjectMapper mapper = new ObjectMapper();
JsonConfiguration jconf =
mapper.readValue(jsonStr, JsonConfiguration.class);
int defaultLength = jconf.getProperties().length;
// add 3 keys to the existing configuration properties
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.key1", "value1");
appendProperty("test.key2", "value2",true);
appendProperty("test.key3", "value3");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
out.close();
outWriter = new StringWriter();
Configuration.dumpConfiguration(conf, outWriter);
jsonStr = outWriter.toString();
mapper = new ObjectMapper();
jconf = mapper.readValue(jsonStr, JsonConfiguration.class);
int length = jconf.getProperties().length;
// check for consistency in the number of properties parsed in Json format.
assertEquals(length, defaultLength+3);
//change few keys in another resource file
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
appendProperty("test.key1", "newValue1");
appendProperty("test.key2", "newValue2");
endConfig();
Path fileResource1 = new Path(CONFIG2);
conf.addResource(fileResource1);
out.close();
outWriter = new StringWriter();
Configuration.dumpConfiguration(conf, outWriter);
jsonStr = outWriter.toString();
mapper = new ObjectMapper();
jconf = mapper.readValue(jsonStr, JsonConfiguration.class);
// put the keys and their corresponding attributes into a hashmap for their
// efficient retrieval
HashMap<String,JsonProperty> confDump = new HashMap<String,JsonProperty>();
for(JsonProperty prop : jconf.getProperties()) {
confDump.put(prop.getKey(), prop);
}
// check if the value and resource of test.key1 is changed
assertEquals("newValue1", confDump.get("test.key1").getValue());
assertEquals(false, confDump.get("test.key1").getIsFinal());
assertEquals(fileResource1.toString(),
confDump.get("test.key1").getResource());
// check if final parameter test.key2 is not changed, since it is first
// loaded as final parameter
assertEquals("value2", confDump.get("test.key2").getValue());
assertEquals(true, confDump.get("test.key2").getIsFinal());
assertEquals(fileResource.toString(),
confDump.get("test.key2").getResource());
// check for other keys which are not modified later
assertEquals("value3", confDump.get("test.key3").getValue());
assertEquals(false, confDump.get("test.key3").getIsFinal());
assertEquals(fileResource.toString(),
confDump.get("test.key3").getResource());
// check for resource to be "Unknown" for keys which are loaded using 'set'
// and expansion of properties
conf.set("test.key4", "value4");
conf.set("test.key5", "value5");
conf.set("test.key6", "${test.key5}");
outWriter = new StringWriter();
Configuration.dumpConfiguration(conf, outWriter);
jsonStr = outWriter.toString();
mapper = new ObjectMapper();
jconf = mapper.readValue(jsonStr, JsonConfiguration.class);
confDump = new HashMap<String, JsonProperty>();
for(JsonProperty prop : jconf.getProperties()) {
confDump.put(prop.getKey(), prop);
}
assertEquals("value5",confDump.get("test.key6").getValue());
assertEquals("programatically", confDump.get("test.key4").getResource());
outWriter.close();
}
public void testDumpConfiguratioWithoutDefaults() throws IOException {
// check for case when default resources are not loaded
Configuration config = new Configuration(false);
StringWriter outWriter = new StringWriter();
Configuration.dumpConfiguration(config, outWriter);
String jsonStr = outWriter.toString();
ObjectMapper mapper = new ObjectMapper();
JsonConfiguration jconf =
mapper.readValue(jsonStr, JsonConfiguration.class);
//ensure that no properties are loaded.
assertEquals(0, jconf.getProperties().length);
// add 2 keys
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.key1", "value1");
appendProperty("test.key2", "value2",true);
endConfig();
Path fileResource = new Path(CONFIG);
config.addResource(fileResource);
out.close();
outWriter = new StringWriter();
Configuration.dumpConfiguration(config, outWriter);
jsonStr = outWriter.toString();
mapper = new ObjectMapper();
jconf = mapper.readValue(jsonStr, JsonConfiguration.class);
HashMap<String, JsonProperty>confDump = new HashMap<String, JsonProperty>();
for (JsonProperty prop : jconf.getProperties()) {
confDump.put(prop.getKey(), prop);
}
//ensure only 2 keys are loaded
assertEquals(2,jconf.getProperties().length);
//ensure the values are consistent
assertEquals(confDump.get("test.key1").getValue(),"value1");
assertEquals(confDump.get("test.key2").getValue(),"value2");
//check the final tag
assertEquals(false, confDump.get("test.key1").getIsFinal());
assertEquals(true, confDump.get("test.key2").getIsFinal());
//check the resource for each property
for (JsonProperty prop : jconf.getProperties()) {
assertEquals(fileResource.toString(),prop.getResource());
}
}
public void testGetValByRegex() {
Configuration conf = new Configuration();
String key1 = "t.abc.key1";
String key2 = "t.abc.key2";
String key3 = "tt.abc.key3";
String key4 = "t.abc.ey3";
conf.set(key1, "value1");
conf.set(key2, "value2");
conf.set(key3, "value3");
conf.set(key4, "value3");
Map<String,String> res = conf.getValByRegex("^t\\..*\\.key\\d");
assertTrue("Conf didn't get key " + key1, res.containsKey(key1));
assertTrue("Conf didn't get key " + key2, res.containsKey(key2));
assertTrue("Picked out wrong key " + key3, !res.containsKey(key3));
assertTrue("Picked out wrong key " + key4, !res.containsKey(key4));
}
public void testSettingValueNull() throws Exception {
Configuration config = new Configuration();
try {
config.set("testClassName", null);
fail("Should throw an IllegalArgumentException exception ");
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
assertEquals(e.getMessage(),
"The value of property testClassName must not be null");
}
}
public void testSettingKeyNull() throws Exception {
Configuration config = new Configuration();
try {
config.set(null, "test");
fail("Should throw an IllegalArgumentException exception ");
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
assertEquals(e.getMessage(), "Property name must not be null");
}
}
public void testInvalidSubstitutation() {
final Configuration configuration = new Configuration(false);
// 2-var loops
//
final String key = "test.random.key";
for (String keyExpression : Arrays.asList(
"${" + key + "}",
"foo${" + key + "}",
"foo${" + key + "}bar",
"${" + key + "}bar")) {
configuration.set(key, keyExpression);
checkSubDepthException(configuration, key);
}
//
// 3-variable loops
//
final String expVal1 = "${test.var2}";
String testVar1 = "test.var1";
configuration.set(testVar1, expVal1);
configuration.set("test.var2", "${test.var3}");
configuration.set("test.var3", "${test.var1}");
checkSubDepthException(configuration, testVar1);
// 3-variable loop with non-empty value prefix/suffix
//
final String expVal2 = "foo2${test.var2}bar2";
configuration.set(testVar1, expVal2);
configuration.set("test.var2", "foo3${test.var3}bar3");
configuration.set("test.var3", "foo1${test.var1}bar1");
checkSubDepthException(configuration, testVar1);
}
private static void checkSubDepthException(Configuration configuration,
String key) {
try {
configuration.get(key);
fail("IllegalStateException depth too large not thrown");
} catch (IllegalStateException e) {
assertTrue("Unexpected exception text: " + e,
e.getMessage().contains("substitution depth"));
}
}
public void testIncompleteSubbing() {
Configuration configuration = new Configuration(false);
String key = "test.random.key";
for (String keyExpression : Arrays.asList(
"{}",
"${}",
"{" + key,
"${" + key,
"foo${" + key,
"foo${" + key + "bar",
"foo{" + key + "}bar",
"${" + key + "bar")) {
configuration.set(key, keyExpression);
String value = configuration.get(key);
assertTrue("Unexpected value " + value, value.equals(keyExpression));
}
}
public void testGetClassByNameOrNull() throws Exception {
Configuration config = new Configuration();
Class<?> clazz = config.getClassByNameOrNull("java.lang.Object");
assertNotNull(clazz);
}
public void testGetFinalParameters() throws Exception {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
declareProperty("my.var", "x", "x", true);
endConfig();
Path fileResource = new Path(CONFIG);
Configuration conf = new Configuration();
Set<String> finalParameters = conf.getFinalParameters();
assertFalse("my.var already exists", finalParameters.contains("my.var"));
conf.addResource(fileResource);
assertEquals("my.var is undefined", "x", conf.get("my.var"));
assertFalse("finalparams not copied", finalParameters.contains("my.var"));
finalParameters = conf.getFinalParameters();
assertTrue("my.var is not final", finalParameters.contains("my.var"));
}
/**
* A test to check whether this thread goes into infinite loop because of
* destruction of data structure by resize of Map. This problem was reported
* by SPARK-2546.
* @throws Exception
*/
public void testConcurrentAccesses() throws Exception {
out = new BufferedWriter(new FileWriter(CONFIG));
startConfig();
declareProperty("some.config", "xyz", "xyz", false);
endConfig();
Path fileResource = new Path(CONFIG);
Configuration conf = new Configuration();
conf.addResource(fileResource);
class ConfigModifyThread extends Thread {
final private Configuration config;
final private String prefix;
public ConfigModifyThread(Configuration conf, String prefix) {
config = conf;
this.prefix = prefix;
}
@Override
public void run() {
for (int i = 0; i < 100000; i++) {
config.set("some.config.value-" + prefix + i, "value");
}
}
}
ArrayList<ConfigModifyThread> threads = new ArrayList<>();
for (int i = 0; i < 100; i++) {
threads.add(new ConfigModifyThread(conf, String.valueOf(i)));
}
for (Thread t: threads) {
t.start();
}
for (Thread t: threads) {
t.join();
}
// If this test completes without going into infinite loop,
// it's expected behaviour.
}
public void testNullValueProperties() throws Exception {
Configuration conf = new Configuration();
conf.setAllowNullValueProperties(true);
out = new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("attr", "value", true);
appendProperty("attr", "", true);
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("value", conf.get("attr"));
}
public static void main(String[] argv) throws Exception {
junit.textui.TestRunner.main(new String[]{
TestConfiguration.class.getName()
});
}
}
| 49,416 | 34.887436 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
import org.junit.Test;
import org.junit.Before;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
import java.io.IOException;
import java.util.Collection;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
public class TestReconfiguration {
private Configuration conf1;
private Configuration conf2;
private static final String PROP1 = "test.prop.one";
private static final String PROP2 = "test.prop.two";
private static final String PROP3 = "test.prop.three";
private static final String PROP4 = "test.prop.four";
private static final String PROP5 = "test.prop.five";
private static final String VAL1 = "val1";
private static final String VAL2 = "val2";
@Before
public void setUp () {
conf1 = new Configuration();
conf2 = new Configuration();
// set some test properties
conf1.set(PROP1, VAL1);
conf1.set(PROP2, VAL1);
conf1.set(PROP3, VAL1);
conf2.set(PROP1, VAL1); // same as conf1
conf2.set(PROP2, VAL2); // different value as conf1
// PROP3 not set in conf2
conf2.set(PROP4, VAL1); // not set in conf1
}
/**
* Test ReconfigurationUtil.getChangedProperties.
*/
@Test
public void testGetChangedProperties() {
Collection<ReconfigurationUtil.PropertyChange> changes =
ReconfigurationUtil.getChangedProperties(conf2, conf1);
assertTrue("expected 3 changed properties but got " + changes.size(),
changes.size() == 3);
boolean changeFound = false;
boolean unsetFound = false;
boolean setFound = false;
for (ReconfigurationUtil.PropertyChange c: changes) {
if (c.prop.equals(PROP2) && c.oldVal != null && c.oldVal.equals(VAL1) &&
c.newVal != null && c.newVal.equals(VAL2)) {
changeFound = true;
} else if (c.prop.equals(PROP3) && c.oldVal != null && c.oldVal.equals(VAL1) &&
c.newVal == null) {
unsetFound = true;
} else if (c.prop.equals(PROP4) && c.oldVal == null &&
c.newVal != null && c.newVal.equals(VAL1)) {
setFound = true;
}
}
assertTrue("not all changes have been applied",
changeFound && unsetFound && setFound);
}
/**
* a simple reconfigurable class
*/
public static class ReconfigurableDummy extends ReconfigurableBase
implements Runnable {
public volatile boolean running = true;
public ReconfigurableDummy(Configuration conf) {
super(conf);
}
@Override
protected Configuration getNewConf() {
return new Configuration();
}
@Override
public Collection<String> getReconfigurableProperties() {
return Arrays.asList(PROP1, PROP2, PROP4);
}
@Override
public synchronized void reconfigurePropertyImpl(
String property, String newVal) throws ReconfigurationException {
// do nothing
}
/**
* Run until PROP1 is no longer VAL1.
*/
@Override
public void run() {
while (running && getConf().get(PROP1).equals(VAL1)) {
try {
Thread.sleep(1);
} catch (InterruptedException ignore) {
// do nothing
}
}
}
}
/**
* Test reconfiguring a Reconfigurable.
*/
@Test
public void testReconfigure() {
ReconfigurableDummy dummy = new ReconfigurableDummy(conf1);
assertTrue(PROP1 + " set to wrong value ",
dummy.getConf().get(PROP1).equals(VAL1));
assertTrue(PROP2 + " set to wrong value ",
dummy.getConf().get(PROP2).equals(VAL1));
assertTrue(PROP3 + " set to wrong value ",
dummy.getConf().get(PROP3).equals(VAL1));
assertTrue(PROP4 + " set to wrong value ",
dummy.getConf().get(PROP4) == null);
assertTrue(PROP5 + " set to wrong value ",
dummy.getConf().get(PROP5) == null);
assertTrue(PROP1 + " should be reconfigurable ",
dummy.isPropertyReconfigurable(PROP1));
assertTrue(PROP2 + " should be reconfigurable ",
dummy.isPropertyReconfigurable(PROP2));
assertFalse(PROP3 + " should not be reconfigurable ",
dummy.isPropertyReconfigurable(PROP3));
assertTrue(PROP4 + " should be reconfigurable ",
dummy.isPropertyReconfigurable(PROP4));
assertFalse(PROP5 + " should not be reconfigurable ",
dummy.isPropertyReconfigurable(PROP5));
// change something to the same value as before
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP1, VAL1);
assertTrue(PROP1 + " set to wrong value ",
dummy.getConf().get(PROP1).equals(VAL1));
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertFalse("received unexpected exception",
exceptionCaught);
}
// change something to null
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP1, null);
assertTrue(PROP1 + "set to wrong value ",
dummy.getConf().get(PROP1) == null);
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertFalse("received unexpected exception",
exceptionCaught);
}
// change something to a different value than before
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP1, VAL2);
assertTrue(PROP1 + "set to wrong value ",
dummy.getConf().get(PROP1).equals(VAL2));
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertFalse("received unexpected exception",
exceptionCaught);
}
// set unset property to null
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP4, null);
assertTrue(PROP4 + "set to wrong value ",
dummy.getConf().get(PROP4) == null);
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertFalse("received unexpected exception",
exceptionCaught);
}
// set unset property
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP4, VAL1);
assertTrue(PROP4 + "set to wrong value ",
dummy.getConf().get(PROP4).equals(VAL1));
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertFalse("received unexpected exception",
exceptionCaught);
}
// try to set unset property to null (not reconfigurable)
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP5, null);
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertTrue("did not receive expected exception",
exceptionCaught);
}
// try to set unset property to value (not reconfigurable)
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP5, VAL1);
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertTrue("did not receive expected exception",
exceptionCaught);
}
// try to change property to value (not reconfigurable)
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP3, VAL2);
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertTrue("did not receive expected exception",
exceptionCaught);
}
// try to change property to null (not reconfigurable)
{
boolean exceptionCaught = false;
try {
dummy.reconfigureProperty(PROP3, null);
} catch (ReconfigurationException e) {
exceptionCaught = true;
}
assertTrue("did not receive expected exception",
exceptionCaught);
}
}
/**
* Test whether configuration changes are visible in another thread.
*/
@Test
public void testThread() throws ReconfigurationException {
ReconfigurableDummy dummy = new ReconfigurableDummy(conf1);
assertTrue(dummy.getConf().get(PROP1).equals(VAL1));
Thread dummyThread = new Thread(dummy);
dummyThread.start();
try {
Thread.sleep(500);
} catch (InterruptedException ignore) {
// do nothing
}
dummy.reconfigureProperty(PROP1, VAL2);
long endWait = Time.now() + 2000;
while (dummyThread.isAlive() && Time.now() < endWait) {
try {
Thread.sleep(50);
} catch (InterruptedException ignore) {
// do nothing
}
}
assertFalse("dummy thread should not be alive",
dummyThread.isAlive());
dummy.running = false;
try {
dummyThread.join();
} catch (InterruptedException ignore) {
// do nothing
}
assertTrue(PROP1 + " is set to wrong value",
dummy.getConf().get(PROP1).equals(VAL2));
}
private static class AsyncReconfigurableDummy extends ReconfigurableBase {
AsyncReconfigurableDummy(Configuration conf) {
super(conf);
}
@Override
protected Configuration getNewConf() {
return new Configuration();
}
final CountDownLatch latch = new CountDownLatch(1);
@Override
public Collection<String> getReconfigurableProperties() {
return Arrays.asList(PROP1, PROP2, PROP4);
}
@Override
public synchronized void reconfigurePropertyImpl(String property,
String newVal) throws ReconfigurationException {
try {
latch.await();
} catch (InterruptedException e) {
// Ignore
}
}
}
private static void waitAsyncReconfigureTaskFinish(ReconfigurableBase rb)
throws InterruptedException {
ReconfigurationTaskStatus status = null;
int count = 20;
while (count > 0) {
status = rb.getReconfigurationTaskStatus();
if (status.stopped()) {
break;
}
count--;
Thread.sleep(500);
}
assert(status.stopped());
}
@Test
public void testAsyncReconfigure()
throws ReconfigurationException, IOException, InterruptedException {
AsyncReconfigurableDummy dummy = spy(new AsyncReconfigurableDummy(conf1));
List<PropertyChange> changes = Lists.newArrayList();
changes.add(new PropertyChange("name1", "new1", "old1"));
changes.add(new PropertyChange("name2", "new2", "old2"));
changes.add(new PropertyChange("name3", "new3", "old3"));
doReturn(changes).when(dummy).getChangedProperties(
any(Configuration.class), any(Configuration.class));
doReturn(true).when(dummy).isPropertyReconfigurable(eq("name1"));
doReturn(false).when(dummy).isPropertyReconfigurable(eq("name2"));
doReturn(true).when(dummy).isPropertyReconfigurable(eq("name3"));
doNothing().when(dummy)
.reconfigurePropertyImpl(eq("name1"), anyString());
doNothing().when(dummy)
.reconfigurePropertyImpl(eq("name2"), anyString());
doThrow(new ReconfigurationException("NAME3", "NEW3", "OLD3",
new IOException("io exception")))
.when(dummy).reconfigurePropertyImpl(eq("name3"), anyString());
dummy.startReconfigurationTask();
waitAsyncReconfigureTaskFinish(dummy);
ReconfigurationTaskStatus status = dummy.getReconfigurationTaskStatus();
assertEquals(2, status.getStatus().size());
for (Map.Entry<PropertyChange, Optional<String>> result :
status.getStatus().entrySet()) {
PropertyChange change = result.getKey();
if (change.prop.equals("name1")) {
assertFalse(result.getValue().isPresent());
} else if (change.prop.equals("name2")) {
assertThat(result.getValue().get(),
containsString("Property name2 is not reconfigurable"));
} else if (change.prop.equals("name3")) {
assertThat(result.getValue().get(), containsString("io exception"));
} else {
fail("Unknown property: " + change.prop);
}
}
}
@Test(timeout=30000)
public void testStartReconfigurationFailureDueToExistingRunningTask()
throws InterruptedException, IOException {
AsyncReconfigurableDummy dummy = spy(new AsyncReconfigurableDummy(conf1));
List<PropertyChange> changes = Lists.newArrayList(
new PropertyChange(PROP1, "new1", "old1")
);
doReturn(changes).when(dummy).getChangedProperties(
any(Configuration.class), any(Configuration.class));
ReconfigurationTaskStatus status = dummy.getReconfigurationTaskStatus();
assertFalse(status.hasTask());
dummy.startReconfigurationTask();
status = dummy.getReconfigurationTaskStatus();
assertTrue(status.hasTask());
assertFalse(status.stopped());
// An active reconfiguration task is running.
try {
dummy.startReconfigurationTask();
fail("Expect to throw IOException.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Another reconfiguration task is running", e);
}
status = dummy.getReconfigurationTaskStatus();
assertTrue(status.hasTask());
assertFalse(status.stopped());
dummy.latch.countDown();
waitAsyncReconfigureTaskFinish(dummy);
status = dummy.getReconfigurationTaskStatus();
assertTrue(status.hasTask());
assertTrue(status.stopped());
// The first task has finished.
dummy.startReconfigurationTask();
waitAsyncReconfigureTaskFinish(dummy);
ReconfigurationTaskStatus status2 = dummy.getReconfigurationTaskStatus();
assertTrue(status2.getStartTime() >= status.getEndTime());
dummy.shutdownReconfigurationTask();
try {
dummy.startReconfigurationTask();
fail("Expect to throw IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("The server is stopped", e);
}
}
}
| 15,413 | 31.314465 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import java.io.ByteArrayOutputStream;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Test;
import junit.framework.TestCase;
public class TestDeprecatedKeys extends TestCase {
//Tests a deprecated key
public void testDeprecatedKeys() throws Exception {
Configuration conf = new Configuration();
conf.set("topology.script.file.name", "xyz");
conf.set("topology.script.file.name", "xyz");
String scriptFile = conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
assertTrue(scriptFile.equals("xyz")) ;
}
//Tests reading / writing a conf file with deprecation after setting
public void testReadWriteWithDeprecatedKeys() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean("old.config.yet.to.be.deprecated", true);
Configuration.addDeprecation("old.config.yet.to.be.deprecated",
new String[]{"new.conf.to.replace.deprecated.conf"});
ByteArrayOutputStream out=new ByteArrayOutputStream();
String fileContents;
try {
conf.writeXml(out);
fileContents = out.toString();
} finally {
out.close();
}
assertTrue(fileContents.contains("old.config.yet.to.be.deprecated"));
assertTrue(fileContents.contains("new.conf.to.replace.deprecated.conf"));
}
@Test
public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys() {
Configuration conf = new Configuration();
Configuration.addDeprecation("dK", new String[]{"nK1", "nK2"});
conf.set("k", "v");
conf.set("dK", "V");
assertEquals("V", conf.get("dK"));
assertEquals("V", conf.get("nK1"));
assertEquals("V", conf.get("nK2"));
conf.set("nK1", "VV");
assertEquals("VV", conf.get("dK"));
assertEquals("VV", conf.get("nK1"));
assertEquals("VV", conf.get("nK2"));
conf.set("nK2", "VVV");
assertEquals("VVV", conf.get("dK"));
assertEquals("VVV", conf.get("nK2"));
assertEquals("VVV", conf.get("nK1"));
boolean kFound = false;
boolean dKFound = false;
boolean nK1Found = false;
boolean nK2Found = false;
for (Map.Entry<String, String> entry : conf) {
if (entry.getKey().equals("k")) {
assertEquals("v", entry.getValue());
kFound = true;
}
if (entry.getKey().equals("dK")) {
assertEquals("VVV", entry.getValue());
dKFound = true;
}
if (entry.getKey().equals("nK1")) {
assertEquals("VVV", entry.getValue());
nK1Found = true;
}
if (entry.getKey().equals("nK2")) {
assertEquals("VVV", entry.getValue());
nK2Found = true;
}
}
assertTrue("regular Key not found", kFound);
assertTrue("deprecated Key not found", dKFound);
assertTrue("new Key 1 not found", nK1Found);
assertTrue("new Key 2 not found", nK2Found);
}
}
| 3,732 | 34.894231 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import java.io.StringWriter;
import java.io.StringReader;
import java.util.Map;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.mortbay.util.ajax.JSON;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import junit.framework.TestCase;
import org.junit.Test;
/**
* Basic test case that the ConfServlet can write configuration
* to its output in XML and JSON format.
*/
public class TestConfServlet extends TestCase {
private static final String TEST_KEY = "testconfservlet.key";
private static final String TEST_VAL = "testval";
private Configuration getTestConf() {
Configuration testConf = new Configuration();
testConf.set(TEST_KEY, TEST_VAL);
return testConf;
}
@Test
@SuppressWarnings("unchecked")
public void testWriteJson() throws Exception {
StringWriter sw = new StringWriter();
ConfServlet.writeResponse(getTestConf(), sw, "json");
String json = sw.toString();
boolean foundSetting = false;
Object parsed = JSON.parse(json);
Object[] properties = ((Map<String, Object[]>)parsed).get("properties");
for (Object o : properties) {
Map<String, Object> propertyInfo = (Map<String, Object>)o;
String key = (String)propertyInfo.get("key");
String val = (String)propertyInfo.get("value");
String resource = (String)propertyInfo.get("resource");
System.err.println("k: " + key + " v: " + val + " r: " + resource);
if (TEST_KEY.equals(key) && TEST_VAL.equals(val)
&& "programatically".equals(resource)) {
foundSetting = true;
}
}
assertTrue(foundSetting);
}
@Test
public void testWriteXml() throws Exception {
StringWriter sw = new StringWriter();
ConfServlet.writeResponse(getTestConf(), sw, "xml");
String xml = sw.toString();
DocumentBuilderFactory docBuilderFactory
= DocumentBuilderFactory.newInstance();
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document doc = builder.parse(new InputSource(new StringReader(xml)));
NodeList nameNodes = doc.getElementsByTagName("name");
boolean foundSetting = false;
for (int i = 0; i < nameNodes.getLength(); i++) {
Node nameNode = nameNodes.item(i);
String key = nameNode.getTextContent();
System.err.println("xml key: " + key);
if (TEST_KEY.equals(key)) {
foundSetting = true;
Element propertyElem = (Element)nameNode.getParentNode();
String val = propertyElem.getElementsByTagName("value").item(0).getTextContent();
assertEquals(TEST_VAL, val);
}
}
assertTrue(foundSetting);
}
@Test
public void testBadFormat() throws Exception {
StringWriter sw = new StringWriter();
try {
ConfServlet.writeResponse(getTestConf(), sw, "not a format");
fail("writeResponse with bad format didn't throw!");
} catch (ConfServlet.BadFormatException bfe) {
// expected
}
assertEquals("", sw.toString());
}
}
| 3,938 | 34.809091 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.ByteArrayOutputStream;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Assert;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration.DeprecationDelta;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.common.util.concurrent.Uninterruptibles;
public class TestConfigurationDeprecation {
private Configuration conf;
final static String CONFIG = new File("./test-config-TestConfigurationDeprecation.xml").getAbsolutePath();
final static String CONFIG2 = new File("./test-config2-TestConfigurationDeprecation.xml").getAbsolutePath();
final static String CONFIG3 = new File("./test-config3-TestConfigurationDeprecation.xml").getAbsolutePath();
BufferedWriter out;
static {
Configuration.addDefaultResource("test-fake-default.xml");
}
@Before
public void setUp() throws Exception {
conf = new Configuration(false);
}
@After
public void tearDown() throws Exception {
new File(CONFIG).delete();
new File(CONFIG2).delete();
new File(CONFIG3).delete();
}
private void startConfig() throws IOException{
out.write("<?xml version=\"1.0\"?>\n");
out.write("<configuration>\n");
}
private void endConfig() throws IOException{
out.write("</configuration>\n");
out.close();
}
void appendProperty(String name, String val) throws IOException {
appendProperty(name, val, false);
}
void appendProperty(String name, String val, boolean isFinal)
throws IOException {
out.write("<property>");
out.write("<name>");
out.write(name);
out.write("</name>");
out.write("<value>");
out.write(val);
out.write("</value>");
if (isFinal) {
out.write("<final>true</final>");
}
out.write("</property>\n");
}
private void addDeprecationToConfiguration() {
Configuration.addDeprecation("A", new String[]{"B"});
Configuration.addDeprecation("C", new String[]{"D"});
Configuration.addDeprecation("E", new String[]{"F"});
Configuration.addDeprecation("G", new String[]{"H"});
Configuration.addDeprecation("I", new String[]{"J"});
Configuration.addDeprecation("M", new String[]{"N"});
Configuration.addDeprecation("X", new String[]{"Y","Z"});
Configuration.addDeprecation("P", new String[]{"Q","R"});
}
/**
* This test checks the correctness of loading/setting the properties in terms
* of occurrence of deprecated keys.
* @throws IOException
*/
@Test
public void testDeprecation() throws IOException {
addDeprecationToConfiguration();
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
// load an old key and a new key.
appendProperty("A", "a");
appendProperty("D", "d");
// load an old key with multiple new-key mappings
appendProperty("P", "p");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
// check if loading of old key with multiple new-key mappings actually loads
// the corresponding new keys.
assertEquals("p", conf.get("P"));
assertEquals("p", conf.get("Q"));
assertEquals("p", conf.get("R"));
assertEquals("a", conf.get("A"));
assertEquals("a", conf.get("B"));
assertEquals("d", conf.get("C"));
assertEquals("d", conf.get("D"));
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
// load the old/new keys corresponding to the keys loaded before.
appendProperty("B", "b");
appendProperty("C", "c");
endConfig();
Path fileResource1 = new Path(CONFIG2);
conf.addResource(fileResource1);
assertEquals("b", conf.get("A"));
assertEquals("b", conf.get("B"));
assertEquals("c", conf.get("C"));
assertEquals("c", conf.get("D"));
// set new key
conf.set("N","n");
// get old key
assertEquals("n", conf.get("M"));
// check consistency in get of old and new keys
assertEquals(conf.get("M"), conf.get("N"));
// set old key and then get new key(s).
conf.set("M", "m");
assertEquals("m", conf.get("N"));
conf.set("X", "x");
assertEquals("x", conf.get("X"));
assertEquals("x", conf.get("Y"));
assertEquals("x", conf.get("Z"));
// set new keys to different values
conf.set("Y", "y");
conf.set("Z", "z");
// get old key
assertEquals("z", conf.get("X"));
}
/**
* This test is to ensure the correctness of loading of keys with respect to
* being marked as final and that are related to deprecation.
* @throws IOException
*/
@Test
public void testDeprecationForFinalParameters() throws IOException {
addDeprecationToConfiguration();
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
// set the following keys:
// 1.old key and final
// 2.new key whose corresponding old key is final
// 3.old key whose corresponding new key is final
// 4.new key and final
// 5.new key which is final and has null value.
appendProperty("A", "a", true);
appendProperty("D", "d");
appendProperty("E", "e");
appendProperty("H", "h", true);
appendProperty("J", "", true);
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("a", conf.get("A"));
assertEquals("a", conf.get("B"));
assertEquals("d", conf.get("C"));
assertEquals("d", conf.get("D"));
assertEquals("e", conf.get("E"));
assertEquals("e", conf.get("F"));
assertEquals("h", conf.get("G"));
assertEquals("h", conf.get("H"));
assertNull(conf.get("I"));
assertNull(conf.get("J"));
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
// add the corresponding old/new keys of those added to CONFIG1
appendProperty("B", "b");
appendProperty("C", "c", true);
appendProperty("F", "f", true);
appendProperty("G", "g");
appendProperty("I", "i");
endConfig();
Path fileResource1 = new Path(CONFIG2);
conf.addResource(fileResource1);
assertEquals("a", conf.get("A"));
assertEquals("a", conf.get("B"));
assertEquals("c", conf.get("C"));
assertEquals("c", conf.get("D"));
assertEquals("f", conf.get("E"));
assertEquals("f", conf.get("F"));
assertEquals("h", conf.get("G"));
assertEquals("h", conf.get("H"));
assertNull(conf.get("I"));
assertNull(conf.get("J"));
out=new BufferedWriter(new FileWriter(CONFIG3));
startConfig();
// change the values of all the previously loaded
// keys (both deprecated and new)
appendProperty("A", "a1");
appendProperty("B", "b1");
appendProperty("C", "c1");
appendProperty("D", "d1");
appendProperty("E", "e1");
appendProperty("F", "f1");
appendProperty("G", "g1");
appendProperty("H", "h1");
appendProperty("I", "i1");
appendProperty("J", "j1");
endConfig();
fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("a", conf.get("A"));
assertEquals("a", conf.get("B"));
assertEquals("c", conf.get("C"));
assertEquals("c", conf.get("D"));
assertEquals("f", conf.get("E"));
assertEquals("f", conf.get("F"));
assertEquals("h", conf.get("G"));
assertEquals("h", conf.get("H"));
assertNull(conf.get("I"));
assertNull(conf.get("J"));
}
@Test
public void testSetBeforeAndGetAfterDeprecation() {
Configuration conf = new Configuration();
conf.set("oldkey", "hello");
Configuration.addDeprecation("oldkey", new String[]{"newkey"});
assertEquals("hello", conf.get("newkey"));
}
@Test
public void testSetBeforeAndGetAfterDeprecationAndDefaults() {
Configuration conf = new Configuration();
conf.set("tests.fake-default.old-key", "hello");
Configuration.addDeprecation("tests.fake-default.old-key",
new String[]{ "tests.fake-default.new-key" });
assertEquals("hello", conf.get("tests.fake-default.new-key"));
}
@Test
public void testIteratorWithDeprecatedKeys() {
Configuration conf = new Configuration();
Configuration.addDeprecation("dK", new String[]{"nK"});
conf.set("k", "v");
conf.set("dK", "V");
assertEquals("V", conf.get("dK"));
assertEquals("V", conf.get("nK"));
conf.set("nK", "VV");
assertEquals("VV", conf.get("dK"));
assertEquals("VV", conf.get("nK"));
boolean kFound = false;
boolean dKFound = false;
boolean nKFound = false;
for (Map.Entry<String, String> entry : conf) {
if (entry.getKey().equals("k")) {
assertEquals("v", entry.getValue());
kFound = true;
}
if (entry.getKey().equals("dK")) {
assertEquals("VV", entry.getValue());
dKFound = true;
}
if (entry.getKey().equals("nK")) {
assertEquals("VV", entry.getValue());
nKFound = true;
}
}
assertTrue("regular Key not found", kFound);
assertTrue("deprecated Key not found", dKFound);
assertTrue("new Key not found", nKFound);
}
@Test
public void testUnsetWithDeprecatedKeys() {
Configuration conf = new Configuration();
Configuration.addDeprecation("dK", new String[]{"nK"});
conf.set("nK", "VV");
assertEquals("VV", conf.get("dK"));
assertEquals("VV", conf.get("nK"));
conf.unset("dK");
assertNull(conf.get("dK"));
assertNull(conf.get("nK"));
conf.set("nK", "VV");
assertEquals("VV", conf.get("dK"));
assertEquals("VV", conf.get("nK"));
conf.unset("nK");
assertNull(conf.get("dK"));
assertNull(conf.get("nK"));
}
private static String getTestKeyName(int threadIndex, int testIndex) {
return "testConcurrentDeprecateAndManipulate.testKey." +
threadIndex + "." + testIndex;
}
/**
* Run a set of threads making changes to the deprecations
* concurrently with another set of threads calling get()
* and set() on Configuration objects.
*/
@SuppressWarnings("deprecation")
@Test(timeout=60000)
public void testConcurrentDeprecateAndManipulate() throws Exception {
final int NUM_THREAD_IDS = 10;
final int NUM_KEYS_PER_THREAD = 1000;
ScheduledThreadPoolExecutor executor =
new ScheduledThreadPoolExecutor(2 * NUM_THREAD_IDS,
new ThreadFactoryBuilder().setDaemon(true).
setNameFormat("testConcurrentDeprecateAndManipulate modification thread %d").
build());
final CountDownLatch latch = new CountDownLatch(1);
final AtomicInteger highestModificationThreadId = new AtomicInteger(1);
List<Future<Void>> futures = new LinkedList<Future<Void>>();
for (int i = 0; i < NUM_THREAD_IDS; i++) {
futures.add(executor.schedule(new Callable<Void>() {
@Override
public Void call() throws Exception {
latch.await();
int threadIndex = highestModificationThreadId.addAndGet(1);
for (int i = 0; i < NUM_KEYS_PER_THREAD; i++) {
String testKey = getTestKeyName(threadIndex, i);
String testNewKey = testKey + ".new";
Configuration.addDeprecations(
new DeprecationDelta[] {
new DeprecationDelta(testKey, testNewKey)
});
}
return null;
}
}, 0, TimeUnit.SECONDS));
}
final AtomicInteger highestAccessThreadId = new AtomicInteger(1);
for (int i = 0; i < NUM_THREAD_IDS; i++) {
futures.add(executor.schedule(new Callable<Void>() {
@Override
public Void call() throws Exception {
Configuration conf = new Configuration();
latch.await();
int threadIndex = highestAccessThreadId.addAndGet(1);
for (int i = 0; i < NUM_KEYS_PER_THREAD; i++) {
String testNewKey = getTestKeyName(threadIndex, i) + ".new";
String value = "value." + threadIndex + "." + i;
conf.set(testNewKey, value);
Assert.assertEquals(value, conf.get(testNewKey));
}
return null;
}
}, 0, TimeUnit.SECONDS));
}
latch.countDown(); // allow all threads to proceed
for (Future<Void> future : futures) {
Uninterruptibles.getUninterruptibly(future);
}
}
@Test
public void testNoFalseDeprecationWarning() throws IOException {
Configuration conf = new Configuration();
Configuration.addDeprecation("AA", "BB");
conf.set("BB", "bb");
conf.get("BB");
conf.writeXml(new ByteArrayOutputStream());
assertEquals(false, Configuration.hasWarnedDeprecation("AA"));
conf.set("AA", "aa");
assertEquals(true, Configuration.hasWarnedDeprecation("AA"));
}
@Test
public void testDeprecationSetUnset() throws IOException {
addDeprecationToConfiguration();
Configuration conf = new Configuration();
//"X" is deprecated by "Y" and "Z"
conf.set("Y", "y");
assertEquals("y", conf.get("Z"));
conf.set("X", "x");
assertEquals("x", conf.get("Z"));
conf.unset("Y");
assertEquals(null, conf.get("Z"));
assertEquals(null, conf.get("X"));
}
}
| 14,520 | 32.769767 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import java.lang.Class;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
/**
* Base class for comparing fields in one or more Configuration classes
* against a corresponding .xml file. Usage is intended as follows:
* <p></p>
* <ol>
* <li> Create a subclass to TestConfigurationFieldsBase
* <li> Define <code>initializeMemberVariables</code> method in the
* subclass. In this class, do the following:
* <p></p>
* <ol>
* <li> <b>Required</b> Set the variable <code>xmlFilename</code> to
* the appropriate xml definition file
* <li> <b>Required</b> Set the variable <code>configurationClasses</code>
* to an array of the classes which define the constants used by the
* code corresponding to the xml files
* <li> <b>Optional</b> Set <code>errorIfMissingConfigProps</code> if the
* subclass should throw an error in the method
* <code>testCompareXmlAgainstConfigurationClass</code>
* <li> <b>Optional</b> Set <code>errorIfMissingXmlProps</code> if the
* subclass should throw an error in the method
* <code>testCompareConfigurationClassAgainstXml</code>
* <li> <b>Optional</b> Instantiate and populate strings into one or
* more of the following variables:
* <br><code>configurationPropsToSkipCompare</code>
* <br><code>configurationPrefixToSkipCompare</code>
* <br><code>xmlPropsToSkipCompare</code>
* <br><code>xmlPrefixToSkipCompare</code>
* <br>
* in order to get comparisons clean
* </ol>
* </ol>
* <p></p>
* The tests to do class-to-file and file-to-class should automatically
* run. This class (and its subclasses) are mostly not intended to be
* overridden, but to do a very specific form of comparison testing.
*/
@Ignore
public abstract class TestConfigurationFieldsBase {
/**
* Member variable for storing xml filename.
*/
protected String xmlFilename = null;
/**
* Member variable for storing all related Configuration classes.
*/
protected Class[] configurationClasses = null;
/**
* Throw error during comparison if missing configuration properties.
* Intended to be set by subclass.
*/
protected boolean errorIfMissingConfigProps = false;
/**
* Throw error during comparison if missing xml properties. Intended
* to be set by subclass.
*/
protected boolean errorIfMissingXmlProps = false;
/**
* Set of properties to skip extracting (and thus comparing later) in
* extractMemberVariablesFromConfigurationFields.
*/
protected Set<String> configurationPropsToSkipCompare = null;
/**
* Set of property prefixes to skip extracting (and thus comparing later)
* in * extractMemberVariablesFromConfigurationFields.
*/
protected Set<String> configurationPrefixToSkipCompare = null;
/**
* Set of properties to skip extracting (and thus comparing later) in
* extractPropertiesFromXml.
*/
protected Set<String> xmlPropsToSkipCompare = null;
/**
* Set of property prefixes to skip extracting (and thus comparing later)
* in extractPropertiesFromXml.
*/
protected Set<String> xmlPrefixToSkipCompare = null;
/**
* Member variable to store Configuration variables for later comparison.
*/
private Map<String,String> configurationMemberVariables = null;
/**
* Member variable to store XML properties for later comparison.
*/
private Map<String,String> xmlKeyValueMap = null;
/**
* Member variable to store Configuration variables that are not in the
* corresponding XML file.
*/
private Set<String> configurationFieldsMissingInXmlFile = null;
/**
* Member variable to store XML variables that are not in the
* corresponding Configuration class(es).
*/
private Set<String> xmlFieldsMissingInConfiguration = null;
/**
* Member variable for debugging base class operation
*/
protected boolean configDebug = false;
protected boolean xmlDebug = false;
/**
* Abstract method to be used by subclasses for initializing base
* members.
*/
public abstract void initializeMemberVariables();
/**
* Utility function to extract "public static final" member
* variables from a Configuration type class.
*
* @param fields The class member variables
* @return HashMap containing <StringValue,MemberVariableName> entries
*/
private HashMap<String,String>
extractMemberVariablesFromConfigurationFields(Field[] fields) {
// Sanity Check
if (fields==null)
return null;
HashMap<String,String> retVal = new HashMap<String,String>();
// Setup regexp for valid properties
String propRegex = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z0-9_-]+)+$";
Pattern p = Pattern.compile(propRegex);
// Iterate through class member variables
int totalFields = 0;
String value;
for (Field f : fields) {
if (configDebug) {
System.out.println("Field: " + f);
}
// Filter out anything that isn't "public static final"
if (!Modifier.isStatic(f.getModifiers()) ||
!Modifier.isPublic(f.getModifiers()) ||
!Modifier.isFinal(f.getModifiers())) {
continue;
}
// Filter out anything that isn't a string. int/float are generally
// default values
if (!f.getType().getName().equals("java.lang.String")) {
continue;
}
// Convert found member into String
try {
value = (String) f.get(null);
} catch (IllegalAccessException iaException) {
continue;
}
if (configDebug) {
System.out.println(" Value: " + value);
}
// Special Case: Detect and ignore partial properties (ending in x)
// or file properties (ending in .xml)
if (value.endsWith(".xml") ||
value.endsWith(".") ||
value.endsWith("-"))
continue;
// Ignore known configuration props
if (configurationPropsToSkipCompare != null) {
if (configurationPropsToSkipCompare.contains(value)) {
continue;
}
}
// Ignore known configuration prefixes
boolean skipPrefix = false;
if (configurationPrefixToSkipCompare != null) {
for (String cfgPrefix : configurationPrefixToSkipCompare) {
if (value.startsWith(cfgPrefix)) {
skipPrefix = true;
break;
}
}
}
if (skipPrefix) {
continue;
}
// Positive Filter: Look only for property values. Expect it to look
// something like: blah.blah2(.blah3.blah4...)
Matcher m = p.matcher(value);
if (!m.find()) {
if (configDebug) {
System.out.println(" Passes Regex: false");
}
continue;
}
if (configDebug) {
System.out.println(" Passes Regex: true");
}
// Save member variable/value as hash
if (!retVal.containsKey(value)) {
retVal.put(value,f.getName());
} else {
if (configDebug) {
System.out.println("ERROR: Already found key for property " + value);
}
}
}
return retVal;
}
/**
* Pull properties and values from filename.
*
* @param filename XML filename
* @return HashMap containing <Property,Value> entries from XML file
*/
private HashMap<String,String> extractPropertiesFromXml
(String filename) {
if (filename==null) {
return null;
}
// Iterate through XML file for name/value pairs
Configuration conf = new Configuration(false);
conf.setAllowNullValueProperties(true);
conf.addResource(filename);
HashMap<String,String> retVal = new HashMap<String,String>();
Iterator<Map.Entry<String,String>> kvItr = conf.iterator();
while (kvItr.hasNext()) {
Map.Entry<String,String> entry = kvItr.next();
String key = entry.getKey();
// Ignore known xml props
if (xmlPropsToSkipCompare != null) {
if (xmlPropsToSkipCompare.contains(key)) {
if (xmlDebug) {
System.out.println(" Skipping Full Key: " + key);
}
continue;
}
}
// Ignore known xml prefixes
boolean skipPrefix = false;
if (xmlPrefixToSkipCompare != null) {
for (String xmlPrefix : xmlPrefixToSkipCompare) {
if (key.startsWith(xmlPrefix)) {
skipPrefix = true;
break;
}
}
}
if (skipPrefix) {
if (xmlDebug) {
System.out.println(" Skipping Prefix Key: " + key);
}
continue;
}
if (conf.onlyKeyExists(key)) {
retVal.put(key,null);
if (xmlDebug) {
System.out.println(" XML Key,Null Value: " + key);
}
} else {
String value = conf.get(key);
if (value!=null) {
retVal.put(key,entry.getValue());
if (xmlDebug) {
System.out.println(" XML Key,Valid Value: " + key);
}
}
}
kvItr.remove();
}
return retVal;
}
/**
* Perform set difference operation on keyMap2 from keyMap1.
*
* @param keyMap1 The initial set
* @param keyMap2 The set to subtract
* @return Returns set operation keyMap1-keyMap2
*/
private static Set<String> compareConfigurationToXmlFields(Map<String,String> keyMap1, Map<String,String> keyMap2) {
Set<String> retVal = new HashSet<String>(keyMap1.keySet());
retVal.removeAll(keyMap2.keySet());
return retVal;
}
/**
* Initialize the four variables corresponding the Configuration
* class and the XML properties file.
*/
@Before
public void setupTestConfigurationFields() throws Exception {
initializeMemberVariables();
// Error if subclass hasn't set class members
assertTrue(xmlFilename!=null);
assertTrue(configurationClasses!=null);
// Create class member/value map
configurationMemberVariables = new HashMap<String,String>();
if (configDebug) {
System.out.println("Reading configuration classes");
System.out.println("");
}
for (Class c : configurationClasses) {
Field[] fields = c.getDeclaredFields();
Map<String,String> memberMap =
extractMemberVariablesFromConfigurationFields(fields);
if (memberMap!=null) {
configurationMemberVariables.putAll(memberMap);
}
}
if (configDebug) {
System.out.println("");
System.out.println("=====");
System.out.println("");
}
// Create XML key/value map
if (xmlDebug) {
System.out.println("Reading XML property files");
System.out.println("");
}
xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
if (xmlDebug) {
System.out.println("");
System.out.println("=====");
System.out.println("");
}
// Find class members not in the XML file
configurationFieldsMissingInXmlFile = compareConfigurationToXmlFields
(configurationMemberVariables, xmlKeyValueMap);
// Find XML properties not in the class
xmlFieldsMissingInConfiguration = compareConfigurationToXmlFields
(xmlKeyValueMap, configurationMemberVariables);
}
/**
* Compares the properties that are in the Configuration class, but not
* in the XML properties file.
*/
@Test
public void testCompareConfigurationClassAgainstXml() {
// Error if subclass hasn't set class members
assertTrue(xmlFilename!=null);
assertTrue(configurationClasses!=null);
final int missingXmlSize = configurationFieldsMissingInXmlFile.size();
for (Class c : configurationClasses) {
System.out.println(c);
}
System.out.println(" (" + configurationMemberVariables.size() + " member variables)");
System.out.println();
StringBuffer xmlErrorMsg = new StringBuffer();
for (Class c : configurationClasses) {
xmlErrorMsg.append(c);
xmlErrorMsg.append(" ");
}
xmlErrorMsg.append("has ");
xmlErrorMsg.append(missingXmlSize);
xmlErrorMsg.append(" variables missing in ");
xmlErrorMsg.append(xmlFilename);
System.out.println(xmlErrorMsg.toString());
System.out.println();
if (missingXmlSize==0) {
System.out.println(" (None)");
} else {
for (String missingField : configurationFieldsMissingInXmlFile) {
System.out.println(" " + missingField);
}
}
System.out.println();
System.out.println("=====");
System.out.println();
if (errorIfMissingXmlProps) {
assertTrue(xmlErrorMsg.toString(), missingXmlSize==0);
}
}
/**
* Compares the properties that are in the XML properties file, but not
* in the Configuration class.
*/
@Test
public void testCompareXmlAgainstConfigurationClass() {
// Error if subclass hasn't set class members
assertTrue(xmlFilename!=null);
assertTrue(configurationClasses!=null);
final int missingConfigSize = xmlFieldsMissingInConfiguration.size();
System.out.println("File " + xmlFilename + " (" + xmlKeyValueMap.size() + " properties)");
System.out.println();
StringBuffer configErrorMsg = new StringBuffer();
configErrorMsg.append(xmlFilename);
configErrorMsg.append(" has ");
configErrorMsg.append(missingConfigSize);
configErrorMsg.append(" properties missing in");
for (Class c : configurationClasses) {
configErrorMsg.append(" " + c);
}
System.out.println(configErrorMsg.toString());
System.out.println();
if (missingConfigSize==0) {
System.out.println(" (None)");
} else {
for (String missingField : xmlFieldsMissingInConfiguration) {
System.out.println(" " + missingField);
}
}
System.out.println();
System.out.println("=====");
System.out.println();
if ( errorIfMissingConfigProps ) {
assertTrue(configErrorMsg.toString(), missingConfigSize==0);
}
}
}
| 15,134 | 31.065678 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import junit.framework.TestCase;
import java.util.Properties;
/**
* Created 21-Jan-2009 13:42:36
*/
public class TestConfigurationSubclass extends TestCase {
private static final String EMPTY_CONFIGURATION_XML
= "/org/apache/hadoop/conf/empty-configuration.xml";
public void testGetProps() {
SubConf conf = new SubConf(true);
Properties properties = conf.getProperties();
assertNotNull("hadoop.tmp.dir is not set",
properties.getProperty("hadoop.tmp.dir"));
}
public void testReload() throws Throwable {
SubConf conf = new SubConf(true);
assertFalse(conf.isReloaded());
Configuration.addDefaultResource(EMPTY_CONFIGURATION_XML);
assertTrue(conf.isReloaded());
Properties properties = conf.getProperties();
}
public void testReloadNotQuiet() throws Throwable {
SubConf conf = new SubConf(true);
conf.setQuietMode(false);
assertFalse(conf.isReloaded());
conf.addResource("not-a-valid-resource");
assertTrue(conf.isReloaded());
try {
Properties properties = conf.getProperties();
fail("Should not have got here");
} catch (RuntimeException e) {
assertTrue(e.toString(),e.getMessage().contains("not found"));
}
}
private static class SubConf extends Configuration {
private boolean reloaded;
/**
* A new configuration where the behavior of reading from the default resources
* can be turned off.
*
* If the parameter {@code loadDefaults} is false, the new instance will not
* load resources from the default files.
*
* @param loadDefaults specifies whether to load from the default files
*/
private SubConf(boolean loadDefaults) {
super(loadDefaults);
}
public Properties getProperties() {
return super.getProps();
}
/**
* {@inheritDoc}.
* Sets the reloaded flag.
*/
@Override
public void reloadConfiguration() {
super.reloadConfiguration();
reloaded = true;
}
public boolean isReloaded() {
return reloaded;
}
public void setReloaded(boolean reloaded) {
this.reloaded = reloaded;
}
}
}
| 3,008 | 28.213592 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.RPC.RpcKind;
import org.apache.hadoop.ipc.Server.Connection;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.primitives.Bytes;
import com.google.common.primitives.Ints;
/** Unit tests for IPC. */
public class TestIPC {
public static final Log LOG =
LogFactory.getLog(TestIPC.class);
private static Configuration conf;
final static private int PING_INTERVAL = 1000;
final static private int MIN_SLEEP_TIME = 1000;
/**
* Flag used to turn off the fault injection behavior
* of the various writables.
**/
static boolean WRITABLE_FAULTS_ENABLED = true;
static int WRITABLE_FAULTS_SLEEP = 0;
@Before
public void setupConf() {
conf = new Configuration();
Client.setPingInterval(conf, PING_INTERVAL);
}
private static final Random RANDOM = new Random();
private static final String ADDRESS = "0.0.0.0";
/** Directory where we can count open file descriptors on Linux */
private static final File FD_DIR = new File("/proc/self/fd");
private static class TestServer extends Server {
// Tests can set callListener to run a piece of code each time the server
// receives a call. This code executes on the server thread, so it has
// visibility of that thread's thread-local storage.
private Runnable callListener;
private boolean sleep;
private Class<? extends Writable> responseClass;
public TestServer(int handlerCount, boolean sleep) throws IOException {
this(handlerCount, sleep, LongWritable.class, null);
}
public TestServer(int handlerCount, boolean sleep,
Class<? extends Writable> paramClass,
Class<? extends Writable> responseClass)
throws IOException {
super(ADDRESS, 0, paramClass, handlerCount, conf);
this.sleep = sleep;
this.responseClass = responseClass;
}
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param,
long receiveTime) throws IOException {
if (sleep) {
// sleep a bit
try {
Thread.sleep(RANDOM.nextInt(PING_INTERVAL) + MIN_SLEEP_TIME);
} catch (InterruptedException e) {}
}
if (callListener != null) {
callListener.run();
}
if (responseClass != null) {
try {
return responseClass.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
return param; // echo param as result
}
}
}
private static class SerialCaller extends Thread {
private Client client;
private InetSocketAddress server;
private int count;
private boolean failed;
public SerialCaller(Client client, InetSocketAddress server, int count) {
this.client = client;
this.server = server;
this.count = count;
}
@Override
public void run() {
for (int i = 0; i < count; i++) {
try {
LongWritable param = new LongWritable(RANDOM.nextLong());
LongWritable value =
(LongWritable)client.call(param, server, null, null, 0, conf);
if (!param.equals(value)) {
LOG.fatal("Call failed!");
failed = true;
break;
}
} catch (Exception e) {
LOG.fatal("Caught: " + StringUtils.stringifyException(e));
failed = true;
}
}
}
}
/**
* A RpcInvocationHandler instance for test. Its invoke function uses the same
* {@link Client} instance, and will fail the first totalRetry times (by
* throwing an IOException).
*/
private static class TestInvocationHandler implements RpcInvocationHandler {
private static int retry = 0;
private final Client client;
private final Server server;
private final int total;
TestInvocationHandler(Client client, Server server, int total) {
this.client = client;
this.server = server;
this.total = total;
}
protected Object returnValue(Object value) throws Exception {
if (retry++ < total) {
throw new IOException("Fake IOException");
}
return value;
}
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
LongWritable param = new LongWritable(RANDOM.nextLong());
LongWritable value = (LongWritable) client.call(param,
NetUtils.getConnectAddress(server), null, null, 0, conf);
return returnValue(value);
}
@Override
public void close() throws IOException {}
@Override
public ConnectionId getConnectionId() {
return null;
}
}
private static class TestInvalidTokenHandler extends TestInvocationHandler {
private int invocations = 0;
TestInvalidTokenHandler(Client client, Server server) {
super(client, server, 1);
}
@Override
protected Object returnValue(Object value) throws Exception {
throw new InvalidToken("Invalid Token");
}
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
invocations++;
return super.invoke(proxy, method, args);
}
}
@Test(timeout=60000)
public void testSerial() throws IOException, InterruptedException {
internalTestSerial(3, false, 2, 5, 100);
internalTestSerial(3, true, 2, 5, 10);
}
public void internalTestSerial(int handlerCount, boolean handlerSleep,
int clientCount, int callerCount, int callCount)
throws IOException, InterruptedException {
Server server = new TestServer(handlerCount, handlerSleep);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
Client[] clients = new Client[clientCount];
for (int i = 0; i < clientCount; i++) {
clients[i] = new Client(LongWritable.class, conf);
}
SerialCaller[] callers = new SerialCaller[callerCount];
for (int i = 0; i < callerCount; i++) {
callers[i] = new SerialCaller(clients[i%clientCount], addr, callCount);
callers[i].start();
}
for (int i = 0; i < callerCount; i++) {
callers[i].join();
assertFalse(callers[i].failed);
}
for (int i = 0; i < clientCount; i++) {
clients[i].stop();
}
server.stop();
}
@Test(timeout=60000)
public void testStandAloneClient() throws IOException {
Client client = new Client(LongWritable.class, conf);
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10);
try {
client.call(new LongWritable(RANDOM.nextLong()),
address, null, null, 0, conf);
fail("Expected an exception to have been thrown");
} catch (IOException e) {
String message = e.getMessage();
String addressText = address.getHostName() + ":" + address.getPort();
assertTrue("Did not find "+addressText+" in "+message,
message.contains(addressText));
Throwable cause=e.getCause();
assertNotNull("No nested exception in "+e,cause);
String causeText=cause.getMessage();
assertTrue("Did not find " + causeText + " in " + message,
message.contains(causeText));
}
}
static void maybeThrowIOE() throws IOException {
if (WRITABLE_FAULTS_ENABLED) {
maybeSleep();
throw new IOException("Injected fault");
}
}
static void maybeThrowRTE() {
if (WRITABLE_FAULTS_ENABLED) {
maybeSleep();
throw new RuntimeException("Injected fault");
}
}
private static void maybeSleep() {
if (WRITABLE_FAULTS_SLEEP > 0) {
try {
Thread.sleep(WRITABLE_FAULTS_SLEEP);
} catch (InterruptedException ie) {
}
}
}
@SuppressWarnings("unused")
private static class IOEOnReadWritable extends LongWritable {
public IOEOnReadWritable() {}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
maybeThrowIOE();
}
}
@SuppressWarnings("unused")
private static class RTEOnReadWritable extends LongWritable {
public RTEOnReadWritable() {}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
maybeThrowRTE();
}
}
@SuppressWarnings("unused")
private static class IOEOnWriteWritable extends LongWritable {
public IOEOnWriteWritable() {}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
maybeThrowIOE();
}
}
@SuppressWarnings("unused")
private static class RTEOnWriteWritable extends LongWritable {
public RTEOnWriteWritable() {}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
maybeThrowRTE();
}
}
/**
* Generic test case for exceptions thrown at some point in the IPC
* process.
*
* @param clientParamClass - client writes this writable for parameter
* @param serverParamClass - server reads this writable for parameter
* @param serverResponseClass - server writes this writable for response
* @param clientResponseClass - client reads this writable for response
*/
private void doErrorTest(
Class<? extends LongWritable> clientParamClass,
Class<? extends LongWritable> serverParamClass,
Class<? extends LongWritable> serverResponseClass,
Class<? extends LongWritable> clientResponseClass)
throws IOException, InstantiationException, IllegalAccessException {
// start server
Server server = new TestServer(1, false,
serverParamClass, serverResponseClass);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
// start client
WRITABLE_FAULTS_ENABLED = true;
Client client = new Client(clientResponseClass, conf);
try {
LongWritable param = clientParamClass.newInstance();
try {
client.call(param, addr, null, null, 0, conf);
fail("Expected an exception to have been thrown");
} catch (Throwable t) {
assertExceptionContains(t, "Injected fault");
}
// Doing a second call with faults disabled should return fine --
// ie the internal state of the client or server should not be broken
// by the failed call
WRITABLE_FAULTS_ENABLED = false;
client.call(param, addr, null, null, 0, conf);
} finally {
server.stop();
}
}
@Test(timeout=60000)
public void testIOEOnClientWriteParam() throws Exception {
doErrorTest(IOEOnWriteWritable.class,
LongWritable.class,
LongWritable.class,
LongWritable.class);
}
@Test(timeout=60000)
public void testRTEOnClientWriteParam() throws Exception {
doErrorTest(RTEOnWriteWritable.class,
LongWritable.class,
LongWritable.class,
LongWritable.class);
}
@Test(timeout=60000)
public void testIOEOnServerReadParam() throws Exception {
doErrorTest(LongWritable.class,
IOEOnReadWritable.class,
LongWritable.class,
LongWritable.class);
}
@Test(timeout=60000)
public void testRTEOnServerReadParam() throws Exception {
doErrorTest(LongWritable.class,
RTEOnReadWritable.class,
LongWritable.class,
LongWritable.class);
}
@Test(timeout=60000)
public void testIOEOnServerWriteResponse() throws Exception {
doErrorTest(LongWritable.class,
LongWritable.class,
IOEOnWriteWritable.class,
LongWritable.class);
}
@Test(timeout=60000)
public void testRTEOnServerWriteResponse() throws Exception {
doErrorTest(LongWritable.class,
LongWritable.class,
RTEOnWriteWritable.class,
LongWritable.class);
}
@Test(timeout=60000)
public void testIOEOnClientReadResponse() throws Exception {
doErrorTest(LongWritable.class,
LongWritable.class,
LongWritable.class,
IOEOnReadWritable.class);
}
@Test(timeout=60000)
public void testRTEOnClientReadResponse() throws Exception {
doErrorTest(LongWritable.class,
LongWritable.class,
LongWritable.class,
RTEOnReadWritable.class);
}
/**
* Test case that fails a write, but only after taking enough time
* that a ping should have been sent. This is a reproducer for a
* deadlock seen in one iteration of HADOOP-6762.
*/
@Test(timeout=60000)
public void testIOEOnWriteAfterPingClient() throws Exception {
// start server
Client.setPingInterval(conf, 100);
try {
WRITABLE_FAULTS_SLEEP = 1000;
doErrorTest(IOEOnWriteWritable.class,
LongWritable.class,
LongWritable.class,
LongWritable.class);
} finally {
WRITABLE_FAULTS_SLEEP = 0;
}
}
private static void assertExceptionContains(
Throwable t, String substring) {
String msg = StringUtils.stringifyException(t);
assertTrue("Exception should contain substring '" + substring + "':\n" +
msg, msg.contains(substring));
LOG.info("Got expected exception", t);
}
/**
* Test that, if the socket factory throws an IOE, it properly propagates
* to the client.
*/
@Test(timeout=60000)
public void testSocketFactoryException() throws IOException {
SocketFactory mockFactory = mock(SocketFactory.class);
doThrow(new IOException("Injected fault")).when(mockFactory).createSocket();
Client client = new Client(LongWritable.class, conf, mockFactory);
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10);
try {
client.call(new LongWritable(RANDOM.nextLong()),
address, null, null, 0, conf);
fail("Expected an exception to have been thrown");
} catch (IOException e) {
assertTrue(e.getMessage().contains("Injected fault"));
}
}
/**
* Test that, if a RuntimeException is thrown after creating a socket
* but before successfully connecting to the IPC server, that the
* failure is handled properly. This is a regression test for
* HADOOP-7428.
*/
@Test(timeout=60000)
public void testRTEDuringConnectionSetup() throws IOException {
// Set up a socket factory which returns sockets which
// throw an RTE when setSoTimeout is called.
SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf));
Mockito.doAnswer(new Answer<Socket>() {
@Override
public Socket answer(InvocationOnMock invocation) throws Throwable {
Socket s = spy((Socket)invocation.callRealMethod());
doThrow(new RuntimeException("Injected fault")).when(s)
.setSoTimeout(anyInt());
return s;
}
}).when(spyFactory).createSocket();
Server server = new TestServer(1, true);
server.start();
try {
// Call should fail due to injected exception.
InetSocketAddress address = NetUtils.getConnectAddress(server);
Client client = new Client(LongWritable.class, conf, spyFactory);
try {
client.call(new LongWritable(RANDOM.nextLong()),
address, null, null, 0, conf);
fail("Expected an exception to have been thrown");
} catch (Exception e) {
LOG.info("caught expected exception", e);
assertTrue(StringUtils.stringifyException(e).contains(
"Injected fault"));
}
// Resetting to the normal socket behavior should succeed
// (i.e. it should not have cached a half-constructed connection)
Mockito.reset(spyFactory);
client.call(new LongWritable(RANDOM.nextLong()),
address, null, null, 0, conf);
} finally {
server.stop();
}
}
@Test(timeout=60000)
public void testIpcTimeout() throws IOException {
// start server
Server server = new TestServer(1, true);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
// start client
Client client = new Client(LongWritable.class, conf);
// set timeout to be less than MIN_SLEEP_TIME
try {
client.call(new LongWritable(RANDOM.nextLong()),
addr, null, null, MIN_SLEEP_TIME/2, conf);
fail("Expected an exception to have been thrown");
} catch (SocketTimeoutException e) {
LOG.info("Get a SocketTimeoutException ", e);
}
// set timeout to be bigger than 3*ping interval
client.call(new LongWritable(RANDOM.nextLong()),
addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
}
@Test(timeout=60000)
public void testIpcConnectTimeout() throws IOException {
// start server
Server server = new TestServer(1, true);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
//Intentionally do not start server to get a connection timeout
// start client
Client.setConnectTimeout(conf, 100);
Client client = new Client(LongWritable.class, conf);
// set the rpc timeout to twice the MIN_SLEEP_TIME
try {
client.call(new LongWritable(RANDOM.nextLong()),
addr, null, null, MIN_SLEEP_TIME*2, conf);
fail("Expected an exception to have been thrown");
} catch (SocketTimeoutException e) {
LOG.info("Get a SocketTimeoutException ", e);
}
}
/**
* Check service class byte in IPC header is correct on wire.
*/
@Test(timeout=60000)
public void testIpcWithServiceClass() throws IOException {
// start server
Server server = new TestServer(5, false);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
// start client
Client.setConnectTimeout(conf, 10000);
callAndVerify(server, addr, 0, true);
// Service Class is low to -128 as byte on wire.
// -128 shouldn't be casted on wire but -129 should.
callAndVerify(server, addr, -128, true);
callAndVerify(server, addr, -129, false);
// Service Class is up to 127.
// 127 shouldn't be casted on wire but 128 should.
callAndVerify(server, addr, 127, true);
callAndVerify(server, addr, 128, false);
server.stop();
}
private static class TestServerQueue extends Server {
final CountDownLatch firstCallLatch = new CountDownLatch(1);
final CountDownLatch callBlockLatch = new CountDownLatch(1);
TestServerQueue(int expectedCalls, int readers, int callQ, int handlers,
Configuration conf) throws IOException {
super(ADDRESS, 0, LongWritable.class, handlers, readers, callQ, conf, null, null);
}
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param,
long receiveTime) throws IOException {
firstCallLatch.countDown();
try {
callBlockLatch.await();
} catch (InterruptedException e) {
throw new IOException(e);
}
return param;
}
}
/**
* Check that reader queueing works
* @throws BrokenBarrierException
* @throws InterruptedException
*/
@Test(timeout=60000)
public void testIpcWithReaderQueuing() throws Exception {
// 1 reader, 1 connectionQ slot, 1 callq
for (int i=0; i < 10; i++) {
checkBlocking(1, 1, 1);
}
// 4 readers, 5 connectionQ slots, 2 callq
for (int i=0; i < 10; i++) {
checkBlocking(4, 5, 2);
}
}
// goal is to jam a handler with a connection, fill the callq with
// connections, in turn jamming the readers - then flood the server and
// ensure that the listener blocks when the reader connection queues fill
private void checkBlocking(int readers, int readerQ, int callQ) throws Exception {
int handlers = 1; // makes it easier
final Configuration conf = new Configuration();
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY, readerQ);
// send in enough clients to block up the handlers, callq, and readers
int initialClients = readers + callQ + handlers;
// max connections we should ever end up accepting at once
int maxAccept = initialClients + readers*readerQ + 1; // 1 = listener
// stress it with 2X the max
int clients = maxAccept*2;
final AtomicInteger failures = new AtomicInteger(0);
final CountDownLatch callFinishedLatch = new CountDownLatch(clients);
// start server
final TestServerQueue server =
new TestServerQueue(clients, readers, callQ, handlers, conf);
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
Client.setConnectTimeout(conf, 10000);
// instantiate the threads, will start in batches
Thread[] threads = new Thread[clients];
for (int i=0; i<clients; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
Client client = new Client(LongWritable.class, conf);
try {
client.call(new LongWritable(Thread.currentThread().getId()),
addr, null, null, 60000, conf);
} catch (Throwable e) {
LOG.error(e);
failures.incrementAndGet();
return;
} finally {
callFinishedLatch.countDown();
client.stop();
}
}
});
}
// start enough clients to block up the handler, callq, and each reader;
// let the calls sequentially slot in to avoid some readers blocking
// and others not blocking in the race to fill the callq
for (int i=0; i < initialClients; i++) {
threads[i].start();
if (i==0) {
// let first reader block in a call
server.firstCallLatch.await();
} else if (i <= callQ) {
// let subsequent readers jam the callq, will happen immediately
while (server.getCallQueueLen() != i) {
Thread.sleep(1);
}
} // additional threads block the readers trying to add to the callq
}
// wait till everything is slotted, should happen immediately
Thread.sleep(10);
if (server.getNumOpenConnections() < initialClients) {
LOG.info("(initial clients) need:"+initialClients+" connections have:"+server.getNumOpenConnections());
Thread.sleep(100);
}
LOG.info("ipc layer should be blocked");
assertEquals(callQ, server.getCallQueueLen());
assertEquals(initialClients, server.getNumOpenConnections());
// now flood the server with the rest of the connections, the reader's
// connection queues should fill and then the listener should block
for (int i=initialClients; i<clients; i++) {
threads[i].start();
}
Thread.sleep(10);
if (server.getNumOpenConnections() < maxAccept) {
LOG.info("(max clients) need:"+maxAccept+" connections have:"+server.getNumOpenConnections());
Thread.sleep(100);
}
// check a few times to make sure we didn't go over
for (int i=0; i<4; i++) {
assertEquals(maxAccept, server.getNumOpenConnections());
Thread.sleep(100);
}
// sanity check that no calls have finished
assertEquals(clients, callFinishedLatch.getCount());
LOG.info("releasing the calls");
server.callBlockLatch.countDown();
callFinishedLatch.await();
for (Thread t : threads) {
t.join();
}
assertEquals(0, failures.get());
server.stop();
}
@Test(timeout=30000)
public void testConnectionIdleTimeouts() throws Exception {
((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG);
final int maxIdle = 1000;
final int cleanupInterval = maxIdle*3/4; // stagger cleanups
final int killMax = 3;
final int clients = 1 + killMax*2; // 1 to block, 2 batches to kill
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, maxIdle);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY, 0);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY, killMax);
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY, cleanupInterval);
final CyclicBarrier firstCallBarrier = new CyclicBarrier(2);
final CyclicBarrier callBarrier = new CyclicBarrier(clients);
final CountDownLatch allCallLatch = new CountDownLatch(clients);
final AtomicBoolean error = new AtomicBoolean();
final TestServer server = new TestServer(clients, false);
Thread[] threads = new Thread[clients];
try {
server.callListener = new Runnable(){
AtomicBoolean first = new AtomicBoolean(true);
@Override
public void run() {
try {
allCallLatch.countDown();
// block first call
if (first.compareAndSet(true, false)) {
firstCallBarrier.await();
} else {
callBarrier.await();
}
} catch (Throwable t) {
LOG.error(t);
error.set(true);
}
}
};
server.start();
// start client
final CountDownLatch callReturned = new CountDownLatch(clients-1);
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
final Configuration clientConf = new Configuration();
clientConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 10000);
for (int i=0; i < clients; i++) {
threads[i] = new Thread(new Runnable(){
@Override
public void run() {
Client client = new Client(LongWritable.class, clientConf);
try {
client.call(new LongWritable(Thread.currentThread().getId()),
addr, null, null, 0, clientConf);
callReturned.countDown();
Thread.sleep(10000);
} catch (IOException e) {
LOG.error(e);
} catch (InterruptedException e) {
}
}
});
threads[i].start();
}
// all calls blocked in handler so all connections made
allCallLatch.await();
assertFalse(error.get());
assertEquals(clients, server.getNumOpenConnections());
// wake up blocked calls and wait for client call to return, no
// connections should have closed
callBarrier.await();
callReturned.await();
assertEquals(clients, server.getNumOpenConnections());
// server won't close till maxIdle*2, so give scanning thread time to
// be almost ready to close idle connection. after which it should
// close max connections on every cleanupInterval
Thread.sleep(maxIdle*2-cleanupInterval);
for (int i=clients; i > 1; i -= killMax) {
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(i, server.getNumOpenConnections());
}
// connection for the first blocked call should still be open
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(1, server.getNumOpenConnections());
// wake up call and ensure connection times out
firstCallBarrier.await();
Thread.sleep(maxIdle*2);
assertFalse(error.get());
assertEquals(0, server.getNumOpenConnections());
} finally {
for (Thread t : threads) {
if (t != null) {
t.interrupt();
t.join();
}
server.stop();
}
}
}
/**
* Make a call from a client and verify if header info is changed in server side
*/
private void callAndVerify(Server server, InetSocketAddress addr,
int serviceClass, boolean noChanged) throws IOException{
Client client = new Client(LongWritable.class, conf);
client.call(new LongWritable(RANDOM.nextLong()),
addr, null, null, MIN_SLEEP_TIME, serviceClass, conf);
Connection connection = server.getConnections()[0];
int serviceClass2 = connection.getServiceClass();
assertFalse(noChanged ^ serviceClass == serviceClass2);
client.stop();
}
@Test(timeout=30000, expected=IOException.class)
public void testIpcAfterStopping() throws IOException {
// start server
Server server = new TestServer(5, false);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
// start client
Client client = new Client(LongWritable.class, conf);
client.call(new LongWritable(RANDOM.nextLong()),
addr, null, null, MIN_SLEEP_TIME, 0, conf);
client.stop();
// This call should throw IOException.
client.call(new LongWritable(RANDOM.nextLong()),
addr, null, null, MIN_SLEEP_TIME, 0, conf);
}
/**
* Check that file descriptors aren't leaked by starting
* and stopping IPC servers.
*/
@Test(timeout=60000)
public void testSocketLeak() throws IOException {
Assume.assumeTrue(FD_DIR.exists()); // only run on Linux
long startFds = countOpenFileDescriptors();
for (int i = 0; i < 50; i++) {
Server server = new TestServer(1, true);
server.start();
server.stop();
}
long endFds = countOpenFileDescriptors();
assertTrue("Leaked " + (endFds - startFds) + " file descriptors",
endFds - startFds < 20);
}
private long countOpenFileDescriptors() {
return FD_DIR.list().length;
}
@Test(timeout=60000)
public void testIpcFromHadoop_0_18_13() throws IOException {
doIpcVersionTest(NetworkTraces.HADOOP_0_18_3_RPC_DUMP,
NetworkTraces.RESPONSE_TO_HADOOP_0_18_3_RPC);
}
@Test(timeout=60000)
public void testIpcFromHadoop0_20_3() throws IOException {
doIpcVersionTest(NetworkTraces.HADOOP_0_20_3_RPC_DUMP,
NetworkTraces.RESPONSE_TO_HADOOP_0_20_3_RPC);
}
@Test(timeout=60000)
public void testIpcFromHadoop0_21_0() throws IOException {
doIpcVersionTest(NetworkTraces.HADOOP_0_21_0_RPC_DUMP,
NetworkTraces.RESPONSE_TO_HADOOP_0_21_0_RPC);
}
@Test(timeout=60000)
public void testHttpGetResponse() throws IOException {
doIpcVersionTest("GET / HTTP/1.0\r\n\r\n".getBytes(),
Server.RECEIVED_HTTP_REQ_RESPONSE.getBytes());
}
@Test(timeout=60000)
public void testConnectionRetriesOnSocketTimeoutExceptions() throws IOException {
Configuration conf = new Configuration();
// set max retries to 0
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
0);
assertRetriesOnSocketTimeouts(conf, 1);
// set max retries to 3
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
3);
assertRetriesOnSocketTimeouts(conf, 4);
}
private static class CallInfo {
int id = RpcConstants.INVALID_CALL_ID;
int retry = RpcConstants.INVALID_RETRY_COUNT;
}
/**
* Test if
* (1) the rpc server uses the call id/retry provided by the rpc client, and
* (2) the rpc client receives the same call id/retry from the rpc server.
*/
@Test(timeout=60000)
public void testCallIdAndRetry() throws IOException {
final CallInfo info = new CallInfo();
// Override client to store the call info and check response
final Client client = new Client(LongWritable.class, conf) {
@Override
Call createCall(RpcKind rpcKind, Writable rpcRequest) {
final Call call = super.createCall(rpcKind, rpcRequest);
info.id = call.id;
info.retry = call.retry;
return call;
}
@Override
void checkResponse(RpcResponseHeaderProto header) throws IOException {
super.checkResponse(header);
Assert.assertEquals(info.id, header.getCallId());
Assert.assertEquals(info.retry, header.getRetryCount());
}
};
// Attach a listener that tracks every call received by the server.
final TestServer server = new TestServer(1, false);
server.callListener = new Runnable() {
@Override
public void run() {
Assert.assertEquals(info.id, Server.getCallId());
Assert.assertEquals(info.retry, Server.getCallRetryCount());
}
};
try {
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller = new SerialCaller(client, addr, 10);
caller.run();
assertFalse(caller.failed);
} finally {
client.stop();
server.stop();
}
}
/** A dummy protocol */
private interface DummyProtocol {
@Idempotent
public void dummyRun() throws IOException;
}
/**
* Test the retry count while used in a retry proxy.
*/
@Test(timeout=60000)
public void testRetryProxy() throws IOException {
final Client client = new Client(LongWritable.class, conf);
final TestServer server = new TestServer(1, false);
server.callListener = new Runnable() {
private int retryCount = 0;
@Override
public void run() {
Assert.assertEquals(retryCount++, Server.getCallRetryCount());
}
};
// try more times, so it is easier to find race condition bug
// 10000 times runs about 6s on a core i7 machine
final int totalRetry = 10000;
DummyProtocol proxy = (DummyProtocol) Proxy.newProxyInstance(
DummyProtocol.class.getClassLoader(),
new Class[] { DummyProtocol.class }, new TestInvocationHandler(client,
server, totalRetry));
DummyProtocol retryProxy = (DummyProtocol) RetryProxy.create(
DummyProtocol.class, proxy, RetryPolicies.RETRY_FOREVER);
try {
server.start();
retryProxy.dummyRun();
Assert.assertEquals(TestInvocationHandler.retry, totalRetry + 1);
} finally {
Client.setCallIdAndRetryCount(0, 0);
client.stop();
server.stop();
}
}
/**
* Test that there is no retry when invalid token exception is thrown.
* Verfies fix for HADOOP-12054
*/
@Test(expected = InvalidToken.class)
public void testNoRetryOnInvalidToken() throws IOException {
final Client client = new Client(LongWritable.class, conf);
final TestServer server = new TestServer(1, false);
TestInvalidTokenHandler handler =
new TestInvalidTokenHandler(client, server);
DummyProtocol proxy = (DummyProtocol) Proxy.newProxyInstance(
DummyProtocol.class.getClassLoader(),
new Class[] { DummyProtocol.class }, handler);
FailoverProxyProvider<DummyProtocol> provider =
new DefaultFailoverProxyProvider<DummyProtocol>(
DummyProtocol.class, proxy);
DummyProtocol retryProxy =
(DummyProtocol) RetryProxy.create(DummyProtocol.class, provider,
RetryPolicies.failoverOnNetworkException(
RetryPolicies.TRY_ONCE_THEN_FAIL, 100, 100, 10000, 0));
try {
server.start();
retryProxy.dummyRun();
} finally {
// Check if dummyRun called only once
Assert.assertEquals(handler.invocations, 1);
Client.setCallIdAndRetryCount(0, 0);
client.stop();
server.stop();
}
}
/**
* Test if the rpc server gets the default retry count (0) from client.
*/
@Test(timeout=60000)
public void testInitialCallRetryCount() throws IOException {
// Override client to store the call id
final Client client = new Client(LongWritable.class, conf);
// Attach a listener that tracks every call ID received by the server.
final TestServer server = new TestServer(1, false);
server.callListener = new Runnable() {
@Override
public void run() {
// we have not set the retry count for the client, thus on the server
// side we should see retry count as 0
Assert.assertEquals(0, Server.getCallRetryCount());
}
};
try {
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller = new SerialCaller(client, addr, 10);
caller.run();
assertFalse(caller.failed);
} finally {
client.stop();
server.stop();
}
}
/**
* Test if the rpc server gets the retry count from client.
*/
@Test(timeout=60000)
public void testCallRetryCount() throws IOException {
final int retryCount = 255;
// Override client to store the call id
final Client client = new Client(LongWritable.class, conf);
Client.setCallIdAndRetryCount(Client.nextCallId(), 255);
// Attach a listener that tracks every call ID received by the server.
final TestServer server = new TestServer(1, false);
server.callListener = new Runnable() {
@Override
public void run() {
// we have not set the retry count for the client, thus on the server
// side we should see retry count as 0
Assert.assertEquals(retryCount, Server.getCallRetryCount());
}
};
try {
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller = new SerialCaller(client, addr, 10);
caller.run();
assertFalse(caller.failed);
} finally {
client.stop();
server.stop();
}
}
/**
* Tests that client generates a unique sequential call ID for each RPC call,
* even if multiple threads are using the same client.
* @throws InterruptedException
*/
@Test(timeout=60000)
public void testUniqueSequentialCallIds()
throws IOException, InterruptedException {
int serverThreads = 10, callerCount = 100, perCallerCallCount = 100;
TestServer server = new TestServer(serverThreads, false);
// Attach a listener that tracks every call ID received by the server. This
// list must be synchronized, because multiple server threads will add to it.
final List<Integer> callIds = Collections.synchronizedList(
new ArrayList<Integer>());
server.callListener = new Runnable() {
@Override
public void run() {
callIds.add(Server.getCallId());
}
};
Client client = new Client(LongWritable.class, conf);
try {
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
SerialCaller[] callers = new SerialCaller[callerCount];
for (int i = 0; i < callerCount; ++i) {
callers[i] = new SerialCaller(client, addr, perCallerCallCount);
callers[i].start();
}
for (int i = 0; i < callerCount; ++i) {
callers[i].join();
assertFalse(callers[i].failed);
}
} finally {
client.stop();
server.stop();
}
int expectedCallCount = callerCount * perCallerCallCount;
assertEquals(expectedCallCount, callIds.size());
// It is not guaranteed that the server executes requests in sequential order
// of client call ID, so we must sort the call IDs before checking that it
// contains every expected value.
Collections.sort(callIds);
final int startID = callIds.get(0).intValue();
for (int i = 0; i < expectedCallCount; ++i) {
assertEquals(startID + i, callIds.get(i).intValue());
}
}
@Test
public void testMaxConnections() throws Exception {
conf.setInt("ipc.server.max.connections", 5);
Server server = null;
Thread connectors[] = new Thread[10];
try {
server = new TestServer(3, false);
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
assertEquals(0, server.getNumOpenConnections());
for (int i = 0; i < 10; i++) {
connectors[i] = new Thread() {
@Override
public void run() {
Socket sock = null;
try {
sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
NetUtils.connect(sock, addr, 3000);
try {
Thread.sleep(4000);
} catch (InterruptedException ie) { }
} catch (IOException ioe) {
} finally {
if (sock != null) {
try {
sock.close();
} catch (IOException ioe) { }
}
}
}
};
connectors[i].start();
}
Thread.sleep(1000);
// server should only accept up to 5 connections
assertEquals(5, server.getNumOpenConnections());
for (int i = 0; i < 10; i++) {
connectors[i].join();
}
} finally {
if (server != null) {
server.stop();
}
conf.setInt("ipc.server.max.connections", 0);
}
}
@Test
public void testClientGetTimeout() throws IOException {
Configuration config = new Configuration();
assertEquals(Client.getTimeout(config), -1);
}
private void assertRetriesOnSocketTimeouts(Configuration conf,
int maxTimeoutRetries) throws IOException {
SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
doThrow(new ConnectTimeoutException("fake")).when(mockFactory).createSocket();
Client client = new Client(IntWritable.class, conf, mockFactory);
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 9090);
try {
client.call(new IntWritable(RANDOM.nextInt()), address, null, null, 0,
conf);
fail("Not throwing the SocketTimeoutException");
} catch (SocketTimeoutException e) {
Mockito.verify(mockFactory, Mockito.times(maxTimeoutRetries))
.createSocket();
}
}
private void doIpcVersionTest(
byte[] requestData,
byte[] expectedResponse) throws IOException {
Server server = new TestServer(1, true);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
Socket socket = new Socket();
try {
NetUtils.connect(socket, addr, 5000);
OutputStream out = socket.getOutputStream();
InputStream in = socket.getInputStream();
out.write(requestData, 0, requestData.length);
out.flush();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
IOUtils.copyBytes(in, baos, 256);
byte[] responseData = baos.toByteArray();
assertEquals(
StringUtils.byteToHexString(expectedResponse),
StringUtils.byteToHexString(responseData));
} finally {
IOUtils.closeSocket(socket);
server.stop();
}
}
/**
* Convert a string of lines that look like:
* "68 72 70 63 02 00 00 00 82 00 1d 6f 72 67 2e 61 hrpc.... ...org.a"
* .. into an array of bytes.
*/
private static byte[] hexDumpToBytes(String hexdump) {
final int LAST_HEX_COL = 3 * 16;
StringBuilder hexString = new StringBuilder();
for (String line : StringUtils.toUpperCase(hexdump).split("\n")) {
hexString.append(line.substring(0, LAST_HEX_COL).replace(" ", ""));
}
return StringUtils.hexStringToByte(hexString.toString());
}
/**
* Wireshark traces collected from various client versions. These enable
* us to test that old versions of the IPC stack will receive the correct
* responses so that they will throw a meaningful error message back
* to the user.
*/
private static abstract class NetworkTraces {
/**
* Wireshark dump of an RPC request from Hadoop 0.18.3
*/
final static byte[] HADOOP_0_18_3_RPC_DUMP =
hexDumpToBytes(
"68 72 70 63 02 00 00 00 82 00 1d 6f 72 67 2e 61 hrpc.... ...org.a\n" +
"70 61 63 68 65 2e 68 61 64 6f 6f 70 2e 69 6f 2e pache.ha doop.io.\n" +
"57 72 69 74 61 62 6c 65 00 30 6f 72 67 2e 61 70 Writable .0org.ap\n" +
"61 63 68 65 2e 68 61 64 6f 6f 70 2e 69 6f 2e 4f ache.had oop.io.O\n" +
"62 6a 65 63 74 57 72 69 74 61 62 6c 65 24 4e 75 bjectWri table$Nu\n" +
"6c 6c 49 6e 73 74 61 6e 63 65 00 2f 6f 72 67 2e llInstan ce./org.\n" +
"61 70 61 63 68 65 2e 68 61 64 6f 6f 70 2e 73 65 apache.h adoop.se\n" +
"63 75 72 69 74 79 2e 55 73 65 72 47 72 6f 75 70 curity.U serGroup\n" +
"49 6e 66 6f 72 6d 61 74 69 6f 6e 00 00 00 6c 00 Informat ion...l.\n" +
"00 00 00 00 12 67 65 74 50 72 6f 74 6f 63 6f 6c .....get Protocol\n" +
"56 65 72 73 69 6f 6e 00 00 00 02 00 10 6a 61 76 Version. .....jav\n" +
"61 2e 6c 61 6e 67 2e 53 74 72 69 6e 67 00 2e 6f a.lang.S tring..o\n" +
"72 67 2e 61 70 61 63 68 65 2e 68 61 64 6f 6f 70 rg.apach e.hadoop\n" +
"2e 6d 61 70 72 65 64 2e 4a 6f 62 53 75 62 6d 69 .mapred. JobSubmi\n" +
"73 73 69 6f 6e 50 72 6f 74 6f 63 6f 6c 00 04 6c ssionPro tocol..l\n" +
"6f 6e 67 00 00 00 00 00 00 00 0a ong..... ... \n");
final static String HADOOP0_18_ERROR_MSG =
"Server IPC version " + RpcConstants.CURRENT_VERSION +
" cannot communicate with client version 2";
/**
* Wireshark dump of the correct response that triggers an error message
* on an 0.18.3 client.
*/
final static byte[] RESPONSE_TO_HADOOP_0_18_3_RPC =
Bytes.concat(hexDumpToBytes(
"00 00 00 00 01 00 00 00 29 6f 72 67 2e 61 70 61 ........ )org.apa\n" +
"63 68 65 2e 68 61 64 6f 6f 70 2e 69 70 63 2e 52 che.hado op.ipc.R\n" +
"50 43 24 56 65 72 73 69 6f 6e 4d 69 73 6d 61 74 PC$Versi onMismat\n" +
"63 68 ch \n"),
Ints.toByteArray(HADOOP0_18_ERROR_MSG.length()),
HADOOP0_18_ERROR_MSG.getBytes());
/**
* Wireshark dump of an RPC request from Hadoop 0.20.3
*/
final static byte[] HADOOP_0_20_3_RPC_DUMP =
hexDumpToBytes(
"68 72 70 63 03 00 00 00 79 27 6f 72 67 2e 61 70 hrpc.... y'org.ap\n" +
"61 63 68 65 2e 68 61 64 6f 6f 70 2e 69 70 63 2e ache.had oop.ipc.\n" +
"56 65 72 73 69 6f 6e 65 64 50 72 6f 74 6f 63 6f Versione dProtoco\n" +
"6c 01 0a 53 54 52 49 4e 47 5f 55 47 49 04 74 6f l..STRIN G_UGI.to\n" +
"64 64 09 04 74 6f 64 64 03 61 64 6d 07 64 69 61 dd..todd .adm.dia\n" +
"6c 6f 75 74 05 63 64 72 6f 6d 07 70 6c 75 67 64 lout.cdr om.plugd\n" +
"65 76 07 6c 70 61 64 6d 69 6e 05 61 64 6d 69 6e ev.lpadm in.admin\n" +
"0a 73 61 6d 62 61 73 68 61 72 65 06 6d 72 74 65 .sambash are.mrte\n" +
"73 74 00 00 00 6c 00 00 00 00 00 12 67 65 74 50 st...l.. ....getP\n" +
"72 6f 74 6f 63 6f 6c 56 65 72 73 69 6f 6e 00 00 rotocolV ersion..\n" +
"00 02 00 10 6a 61 76 61 2e 6c 61 6e 67 2e 53 74 ....java .lang.St\n" +
"72 69 6e 67 00 2e 6f 72 67 2e 61 70 61 63 68 65 ring..or g.apache\n" +
"2e 68 61 64 6f 6f 70 2e 6d 61 70 72 65 64 2e 4a .hadoop. mapred.J\n" +
"6f 62 53 75 62 6d 69 73 73 69 6f 6e 50 72 6f 74 obSubmis sionProt\n" +
"6f 63 6f 6c 00 04 6c 6f 6e 67 00 00 00 00 00 00 ocol..lo ng......\n" +
"00 14 .. \n");
final static String HADOOP0_20_ERROR_MSG =
"Server IPC version " + RpcConstants.CURRENT_VERSION +
" cannot communicate with client version 3";
final static byte[] RESPONSE_TO_HADOOP_0_20_3_RPC =
Bytes.concat(hexDumpToBytes(
"ff ff ff ff ff ff ff ff 00 00 00 29 6f 72 67 2e ........ ...)org.\n" +
"61 70 61 63 68 65 2e 68 61 64 6f 6f 70 2e 69 70 apache.h adoop.ip\n" +
"63 2e 52 50 43 24 56 65 72 73 69 6f 6e 4d 69 73 c.RPC$Ve rsionMis\n" +
"6d 61 74 63 68 match \n"),
Ints.toByteArray(HADOOP0_20_ERROR_MSG.length()),
HADOOP0_20_ERROR_MSG.getBytes());
final static String HADOOP0_21_ERROR_MSG =
"Server IPC version " + RpcConstants.CURRENT_VERSION +
" cannot communicate with client version 4";
final static byte[] HADOOP_0_21_0_RPC_DUMP =
hexDumpToBytes(
"68 72 70 63 04 50 hrpc.P" +
// in 0.21 it comes in two separate TCP packets
"00 00 00 3c 33 6f 72 67 2e 61 70 61 63 68 65 2e ...<3org .apache.\n" +
"68 61 64 6f 6f 70 2e 6d 61 70 72 65 64 75 63 65 hadoop.m apreduce\n" +
"2e 70 72 6f 74 6f 63 6f 6c 2e 43 6c 69 65 6e 74 .protoco l.Client\n" +
"50 72 6f 74 6f 63 6f 6c 01 00 04 74 6f 64 64 00 Protocol ...todd.\n" +
"00 00 00 71 00 00 00 00 00 12 67 65 74 50 72 6f ...q.... ..getPro\n" +
"74 6f 63 6f 6c 56 65 72 73 69 6f 6e 00 00 00 02 tocolVer sion....\n" +
"00 10 6a 61 76 61 2e 6c 61 6e 67 2e 53 74 72 69 ..java.l ang.Stri\n" +
"6e 67 00 33 6f 72 67 2e 61 70 61 63 68 65 2e 68 ng.3org. apache.h\n" +
"61 64 6f 6f 70 2e 6d 61 70 72 65 64 75 63 65 2e adoop.ma preduce.\n" +
"70 72 6f 74 6f 63 6f 6c 2e 43 6c 69 65 6e 74 50 protocol .ClientP\n" +
"72 6f 74 6f 63 6f 6c 00 04 6c 6f 6e 67 00 00 00 rotocol. .long...\n" +
"00 00 00 00 21 ....! \n");
final static byte[] RESPONSE_TO_HADOOP_0_21_0_RPC =
Bytes.concat(hexDumpToBytes(
"ff ff ff ff ff ff ff ff 00 00 00 29 6f 72 67 2e ........ ...)org.\n" +
"61 70 61 63 68 65 2e 68 61 64 6f 6f 70 2e 69 70 apache.h adoop.ip\n" +
"63 2e 52 50 43 24 56 65 72 73 69 6f 6e 4d 69 73 c.RPC$Ve rsionMis\n" +
"6d 61 74 63 68 match \n"),
Ints.toByteArray(HADOOP0_21_ERROR_MSG.length()),
HADOOP0_21_ERROR_MSG.getBytes());
}
}
| 52,138 | 34.252874 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import org.junit.Assert;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
/**
* MiniRPCBenchmark measures time to establish an RPC connection
* to a secure RPC server.
* It sequentially establishes connections the specified number of times,
* and calculates the average time taken to connect.
* The time to connect includes the server side authentication time.
* The benchmark supports three authentication methods:
* <ol>
* <li>simple - no authentication. In order to enter this mode
* the configuration file <tt>core-site.xml</tt> should specify
* <tt>hadoop.security.authentication = simple</tt>.
* This is the default mode.</li>
* <li>kerberos - kerberos authentication. In order to enter this mode
* the configuration file <tt>core-site.xml</tt> should specify
* <tt>hadoop.security.authentication = kerberos</tt> and
* the argument string should provide qualifying
* <tt>keytabFile</tt> and <tt>userName</tt> parameters.
* <li>delegation token - authentication using delegation token.
* In order to enter this mode the benchmark should provide all the
* mentioned parameters for kerberos authentication plus the
* <tt>useToken</tt> argument option.
* </ol>
* Input arguments:
* <ul>
* <li>numIterations - number of connections to establish</li>
* <li>keytabFile - keytab file for kerberos authentication</li>
* <li>userName - principal name for kerberos authentication</li>
* <li>useToken - should be specified for delegation token authentication</li>
* <li>logLevel - logging level, see {@link Level}</li>
* </ul>
*/
public class MiniRPCBenchmark {
private static final String KEYTAB_FILE_KEY = "test.keytab.file";
private static final String USER_NAME_KEY = "test.user.name";
private static final String MINI_USER = "miniUser";
private static final String RENEWER = "renewer";
private static final String GROUP_NAME_1 = "MiniGroup1";
private static final String GROUP_NAME_2 = "MiniGroup2";
private static final String[] GROUP_NAMES =
new String[] {GROUP_NAME_1, GROUP_NAME_2};
private UserGroupInformation currentUgi;
private Level logLevel;
MiniRPCBenchmark(Level l) {
currentUgi = null;
logLevel = l;
}
public static class TestDelegationTokenSelector extends
AbstractDelegationTokenSelector<TestDelegationTokenIdentifier>{
protected TestDelegationTokenSelector() {
super(new Text("MY KIND"));
}
}
@KerberosInfo(
serverPrincipal=USER_NAME_KEY)
@TokenInfo(TestDelegationTokenSelector.class)
public static interface MiniProtocol extends VersionedProtocol {
public static final long versionID = 1L;
/**
* Get a Delegation Token.
*/
public Token<TestDelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException;
}
/**
* Primitive RPC server, which
* allows clients to connect to it.
*/
static class MiniServer implements MiniProtocol {
private static final String DEFAULT_SERVER_ADDRESS = "0.0.0.0";
private TestDelegationTokenSecretManager secretManager;
private Server rpcServer;
@Override // VersionedProtocol
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(MiniProtocol.class.getName()))
return versionID;
throw new IOException("Unknown protocol: " + protocol);
}
@Override // VersionedProtocol
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion,
int clientMethodsHashCode) throws IOException {
if (protocol.equals(MiniProtocol.class.getName()))
return new ProtocolSignature(versionID, null);
throw new IOException("Unknown protocol: " + protocol);
}
@Override // MiniProtocol
public Token<TestDelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException {
String owner = UserGroupInformation.getCurrentUser().getUserName();
String realUser =
UserGroupInformation.getCurrentUser().getRealUser() == null ? "":
UserGroupInformation.getCurrentUser().getRealUser().getUserName();
TestDelegationTokenIdentifier tokenId =
new TestDelegationTokenIdentifier(
new Text(owner), renewer, new Text(realUser));
return new Token<TestDelegationTokenIdentifier>(tokenId, secretManager);
}
/** Start RPC server */
MiniServer(Configuration conf, String user, String keytabFile)
throws IOException {
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab(user, keytabFile);
secretManager =
new TestDelegationTokenSecretManager(24*60*60*1000,
7*24*60*60*1000,24*60*60*1000,3600000);
secretManager.startThreads();
rpcServer = new RPC.Builder(conf).setProtocol(MiniProtocol.class)
.setInstance(this).setBindAddress(DEFAULT_SERVER_ADDRESS).setPort(0)
.setNumHandlers(1).setVerbose(false).setSecretManager(secretManager)
.build();
rpcServer.start();
}
/** Stop RPC server */
void stop() {
if(rpcServer != null) rpcServer.stop();
rpcServer = null;
}
/** Get RPC server address */
InetSocketAddress getAddress() {
if(rpcServer == null) return null;
return NetUtils.getConnectAddress(rpcServer);
}
}
long connectToServer(Configuration conf, InetSocketAddress addr)
throws IOException {
MiniProtocol client = null;
try {
long start = Time.now();
client = RPC.getProxy(MiniProtocol.class,
MiniProtocol.versionID, addr, conf);
long end = Time.now();
return end - start;
} finally {
RPC.stopProxy(client);
}
}
void connectToServerAndGetDelegationToken(
final Configuration conf, final InetSocketAddress addr) throws IOException {
MiniProtocol client = null;
try {
UserGroupInformation current = UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUserUgi =
UserGroupInformation.createProxyUserForTesting(
MINI_USER, current, GROUP_NAMES);
try {
client = proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
@Override
public MiniProtocol run() throws IOException {
MiniProtocol p = RPC.getProxy(MiniProtocol.class,
MiniProtocol.versionID, addr, conf);
Token<TestDelegationTokenIdentifier> token;
token = p.getDelegationToken(new Text(RENEWER));
currentUgi = UserGroupInformation.createUserForTesting(MINI_USER,
GROUP_NAMES);
SecurityUtil.setTokenService(token, addr);
currentUgi.addToken(token);
return p;
}
});
} catch (InterruptedException e) {
Assert.fail(Arrays.toString(e.getStackTrace()));
}
} finally {
RPC.stopProxy(client);
}
}
long connectToServerUsingDelegationToken(
final Configuration conf, final InetSocketAddress addr) throws IOException {
MiniProtocol client = null;
try {
long start = Time.now();
try {
client = currentUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
@Override
public MiniProtocol run() throws IOException {
return RPC.getProxy(MiniProtocol.class,
MiniProtocol.versionID, addr, conf);
}
});
} catch (InterruptedException e) {
e.printStackTrace();
}
long end = Time.now();
return end - start;
} finally {
RPC.stopProxy(client);
}
}
static void setLoggingLevel(Level level) {
LogManager.getLogger(Server.class.getName()).setLevel(level);
((Log4JLogger)Server.AUDITLOG).getLogger().setLevel(level);
LogManager.getLogger(Client.class.getName()).setLevel(level);
}
/**
* Run MiniBenchmark with MiniServer as the RPC server.
*
* @param conf - configuration
* @param count - connect this many times
* @param keytabKey - key for keytab file in the configuration
* @param userNameKey - key for user name in the configuration
* @return average time to connect
* @throws IOException
*/
long runMiniBenchmark(Configuration conf,
int count,
String keytabKey,
String userNameKey) throws IOException {
// get login information
String user = System.getProperty("user.name");
if(userNameKey != null)
user = conf.get(userNameKey, user);
String keytabFile = null;
if(keytabKey != null)
keytabFile = conf.get(keytabKey, keytabFile);
MiniServer miniServer = null;
try {
// start the server
miniServer = new MiniServer(conf, user, keytabFile);
InetSocketAddress addr = miniServer.getAddress();
connectToServer(conf, addr);
// connect to the server count times
setLoggingLevel(logLevel);
long elapsed = 0L;
for(int idx = 0; idx < count; idx ++) {
elapsed += connectToServer(conf, addr);
}
return elapsed;
} finally {
if(miniServer != null) miniServer.stop();
}
}
/**
* Run MiniBenchmark using delegation token authentication.
*
* @param conf - configuration
* @param count - connect this many times
* @param keytabKey - key for keytab file in the configuration
* @param userNameKey - key for user name in the configuration
* @return average time to connect
* @throws IOException
*/
long runMiniBenchmarkWithDelegationToken(Configuration conf,
int count,
String keytabKey,
String userNameKey)
throws IOException {
// get login information
String user = System.getProperty("user.name");
if(userNameKey != null)
user = conf.get(userNameKey, user);
String keytabFile = null;
if(keytabKey != null)
keytabFile = conf.get(keytabKey, keytabFile);
MiniServer miniServer = null;
UserGroupInformation.setConfiguration(conf);
String shortUserName =
UserGroupInformation.createRemoteUser(user).getShortUserName();
try {
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(shortUserName), GROUP_NAME_1);
configureSuperUserIPAddresses(conf, shortUserName);
// start the server
miniServer = new MiniServer(conf, user, keytabFile);
InetSocketAddress addr = miniServer.getAddress();
connectToServerAndGetDelegationToken(conf, addr);
// connect to the server count times
setLoggingLevel(logLevel);
long elapsed = 0L;
for(int idx = 0; idx < count; idx ++) {
elapsed += connectToServerUsingDelegationToken(conf, addr);
}
return elapsed;
} finally {
if(miniServer != null) miniServer.stop();
}
}
static void printUsage() {
System.err.println(
"Usage: MiniRPCBenchmark <numIterations> [<keytabFile> [<userName> " +
"[useToken|useKerberos [<logLevel>]]]]");
System.exit(-1);
}
public static void main(String[] args) throws Exception {
System.out.println("Benchmark: RPC session establishment.");
if(args.length < 1)
printUsage();
Configuration conf = new Configuration();
int count = Integer.parseInt(args[0]);
if(args.length > 1)
conf.set(KEYTAB_FILE_KEY, args[1]);
if(args.length > 2)
conf.set(USER_NAME_KEY, args[2]);
boolean useDelegationToken = false;
if(args.length > 3)
useDelegationToken = args[3].equalsIgnoreCase("useToken");
Level l = Level.ERROR;
if(args.length > 4)
l = Level.toLevel(args[4]);
MiniRPCBenchmark mb = new MiniRPCBenchmark(l);
long elapsedTime = 0;
if(useDelegationToken) {
System.out.println(
"Running MiniRPCBenchmark with delegation token authentication.");
elapsedTime = mb.runMiniBenchmarkWithDelegationToken(
conf, count, KEYTAB_FILE_KEY, USER_NAME_KEY);
} else {
String auth = SecurityUtil.getAuthenticationMethod(conf).toString();
System.out.println(
"Running MiniRPCBenchmark with " + auth + " authentication.");
elapsedTime = mb.runMiniBenchmark(
conf, count, KEYTAB_FILE_KEY, USER_NAME_KEY);
}
System.out.println(org.apache.hadoop.util.VersionInfo.getVersion());
System.out.println("Number of connects: " + count);
System.out.println("Average connect time: " + ((double)elapsedTime/count));
}
private void configureSuperUserIPAddresses(Configuration conf,
String superUserShortName) throws IOException {
ArrayList<String> ipList = new ArrayList<String>();
Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
.getNetworkInterfaces();
while (netInterfaceList.hasMoreElements()) {
NetworkInterface inf = netInterfaceList.nextElement();
Enumeration<InetAddress> addrList = inf.getInetAddresses();
while (addrList.hasMoreElements()) {
InetAddress addr = addrList.nextElement();
ipList.add(addr.getHostAddress());
}
}
StringBuilder builder = new StringBuilder();
for (String ip : ipList) {
builder.append(ip);
builder.append(',');
}
builder.append("127.0.1.1,");
builder.append(InetAddress.getLocalHost().getCanonicalHostName());
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(superUserShortName), builder.toString());
}
}
| 15,752 | 36.686603 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.RPC.Server;
import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
import com.google.protobuf.BlockingService;
/**
* Benchmark for protobuf RPC.
* Run with --help option for usage.
*/
public class RPCCallBenchmark implements Tool {
private Configuration conf;
private AtomicLong callCount = new AtomicLong(0);
private static ThreadMXBean threadBean =
ManagementFactory.getThreadMXBean();
private static class MyOptions {
private boolean failed = false;
private int serverThreads = 0;
private int serverReaderThreads = 1;
private int clientThreads = 0;
private String host = "0.0.0.0";
private int port = 0;
public int secondsToRun = 15;
private int msgSize = 1024;
public Class<? extends RpcEngine> rpcEngine =
WritableRpcEngine.class;
private MyOptions(String args[]) {
try {
Options opts = buildOptions();
CommandLineParser parser = new GnuParser();
CommandLine line = parser.parse(opts, args, true);
processOptions(line, opts);
validateOptions();
} catch (ParseException e) {
System.err.println(e.getMessage());
System.err.println("Try \"--help\" option for details.");
failed = true;
}
}
private void validateOptions() throws ParseException {
if (serverThreads <= 0 && clientThreads <= 0) {
throw new ParseException("Must specify at least -c or -s");
}
}
@SuppressWarnings("static-access")
private Options buildOptions() {
Options opts = new Options();
opts.addOption(
OptionBuilder.withLongOpt("serverThreads").hasArg(true)
.withArgName("numthreads")
.withDescription("number of server threads (handlers) to run (or 0 to not run server)")
.create("s"));
opts.addOption(
OptionBuilder.withLongOpt("serverReaderThreads").hasArg(true)
.withArgName("threads")
.withDescription("number of server reader threads to run")
.create("r"));
opts.addOption(
OptionBuilder.withLongOpt("clientThreads").hasArg(true)
.withArgName("numthreads")
.withDescription("number of client threads to run (or 0 to not run client)")
.create("c"));
opts.addOption(
OptionBuilder.withLongOpt("messageSize").hasArg(true)
.withArgName("bytes")
.withDescription("size of call parameter in bytes")
.create("m"));
opts.addOption(
OptionBuilder.withLongOpt("time").hasArg(true)
.withArgName("seconds")
.withDescription("number of seconds to run clients for")
.create("t"));
opts.addOption(
OptionBuilder.withLongOpt("port").hasArg(true)
.withArgName("port")
.withDescription("port to listen or connect on")
.create("p"));
opts.addOption(
OptionBuilder.withLongOpt("host").hasArg(true)
.withArgName("addr")
.withDescription("host to listen or connect on")
.create('h'));
opts.addOption(
OptionBuilder.withLongOpt("engine").hasArg(true)
.withArgName("writable|protobuf")
.withDescription("engine to use")
.create('e'));
opts.addOption(
OptionBuilder.withLongOpt("help").hasArg(false)
.withDescription("show this screen")
.create('?'));
return opts;
}
private void processOptions(CommandLine line, Options opts)
throws ParseException {
if (line.hasOption("help") || line.hasOption('?')) {
HelpFormatter formatter = new HelpFormatter();
System.out.println("Protobuf IPC benchmark.");
System.out.println();
formatter.printHelp(100,
"java ... PBRPCBenchmark [options]",
"\nSupported options:", opts, "");
return;
}
if (line.hasOption('s')) {
serverThreads = Integer.parseInt(line.getOptionValue('s'));
}
if (line.hasOption('r')) {
serverReaderThreads = Integer.parseInt(line.getOptionValue('r'));
}
if (line.hasOption('c')) {
clientThreads = Integer.parseInt(line.getOptionValue('c'));
}
if (line.hasOption('t')) {
secondsToRun = Integer.parseInt(line.getOptionValue('t'));
}
if (line.hasOption('m')) {
msgSize = Integer.parseInt(line.getOptionValue('m'));
}
if (line.hasOption('p')) {
port = Integer.parseInt(line.getOptionValue('p'));
}
if (line.hasOption('h')) {
host = line.getOptionValue('h');
}
if (line.hasOption('e')) {
String eng = line.getOptionValue('e');
if ("protobuf".equals(eng)) {
rpcEngine = ProtobufRpcEngine.class;
} else if ("writable".equals(eng)) {
rpcEngine = WritableRpcEngine.class;
} else {
throw new ParseException("invalid engine: " + eng);
}
}
String[] remainingArgs = line.getArgs();
if (remainingArgs.length != 0) {
throw new ParseException("Extra arguments: " +
Joiner.on(" ").join(remainingArgs));
}
}
public int getPort() {
if (port == 0) {
port = NetUtils.getFreeSocketPort();
if (port == 0) {
throw new RuntimeException("Could not find a free port");
}
}
return port;
}
@Override
public String toString() {
return "rpcEngine=" + rpcEngine + "\nserverThreads=" + serverThreads
+ "\nserverReaderThreads=" + serverReaderThreads + "\nclientThreads="
+ clientThreads + "\nhost=" + host + "\nport=" + getPort()
+ "\nsecondsToRun=" + secondsToRun + "\nmsgSize=" + msgSize;
}
}
private Server startServer(MyOptions opts) throws IOException {
if (opts.serverThreads <= 0) {
return null;
}
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,
opts.serverReaderThreads);
RPC.Server server;
// Get RPC server for server side implementation
if (opts.rpcEngine == ProtobufRpcEngine.class) {
// Create server side implementation
PBServerImpl serverImpl = new PBServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(serverImpl);
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
.setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
.setNumHandlers(opts.serverThreads).setVerbose(false).build();
} else if (opts.rpcEngine == WritableRpcEngine.class) {
server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
.setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
.setVerbose(false).build();
} else {
throw new RuntimeException("Bad engine: " + opts.rpcEngine);
}
server.start();
return server;
}
private long getTotalCpuTime(Iterable<? extends Thread> threads) {
long total = 0;
for (Thread t : threads) {
long tid = t.getId();
total += threadBean.getThreadCpuTime(tid);
}
return total;
}
@Override
public int run(String[] args) throws Exception {
MyOptions opts = new MyOptions(args);
if (opts.failed) {
return -1;
}
// Set RPC engine to the configured RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class, opts.rpcEngine);
Server server = startServer(opts);
try {
TestContext ctx = setupClientTestContext(opts);
if (ctx != null) {
long totalCalls = 0;
ctx.startThreads();
long veryStart = System.nanoTime();
// Loop printing results every second until the specified
// time has elapsed
for (int i = 0; i < opts.secondsToRun ; i++) {
long st = System.nanoTime();
ctx.waitFor(1000);
long et = System.nanoTime();
long ct = callCount.getAndSet(0);
totalCalls += ct;
double callsPerSec = (ct * 1000000000)/(et - st);
System.out.println("Calls per second: " + callsPerSec);
}
// Print results
if (totalCalls > 0) {
long veryEnd = System.nanoTime();
double callsPerSec =
(totalCalls * 1000000000)/(veryEnd - veryStart);
long cpuNanosClient = getTotalCpuTime(ctx.getTestThreads());
long cpuNanosServer = -1;
if (server != null) {
cpuNanosServer = getTotalCpuTime(server.getHandlers());;
}
System.out.println("====== Results ======");
System.out.println("Options:\n" + opts);
System.out.println("Total calls per second: " + callsPerSec);
System.out.println("CPU time per call on client: " +
(cpuNanosClient / totalCalls) + " ns");
if (server != null) {
System.out.println("CPU time per call on server: " +
(cpuNanosServer / totalCalls) + " ns");
}
} else {
System.out.println("No calls!");
}
ctx.stop();
} else {
while (true) {
Thread.sleep(10000);
}
}
} finally {
if (server != null) {
server.stop();
}
}
return 0;
}
private TestContext setupClientTestContext(final MyOptions opts)
throws IOException, InterruptedException {
if (opts.clientThreads <= 0) {
return null;
}
// Set up a separate proxy for each client thread,
// rather than making them share TCP pipes.
int numProxies = opts.clientThreads;
final RpcServiceWrapper proxies[] = new RpcServiceWrapper[numProxies];
for (int i = 0; i < numProxies; i++) {
proxies[i] =
UserGroupInformation.createUserForTesting("proxy-" + i,new String[]{})
.doAs(new PrivilegedExceptionAction<RpcServiceWrapper>() {
@Override
public RpcServiceWrapper run() throws Exception {
return createRpcClient(opts);
}
});
}
// Create an echo message of the desired length
final StringBuilder msgBuilder = new StringBuilder(opts.msgSize);
for (int c = 0; c < opts.msgSize; c++) {
msgBuilder.append('x');
}
final String echoMessage = msgBuilder.toString();
// Create the clients in a test context
TestContext ctx = new TestContext();
for (int i = 0; i < opts.clientThreads; i++) {
final RpcServiceWrapper proxy = proxies[i % numProxies];
ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
@Override
public void doAnAction() throws Exception {
proxy.doEcho(echoMessage);
callCount.incrementAndGet();
}
});
}
return ctx;
}
/**
* Simple interface that can be implemented either by the
* protobuf or writable implementations.
*/
private interface RpcServiceWrapper {
public String doEcho(String msg) throws Exception;
}
/**
* Create a client proxy for the specified engine.
*/
private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException {
InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort());
if (opts.rpcEngine == ProtobufRpcEngine.class) {
final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf);
return new RpcServiceWrapper() {
@Override
public String doEcho(String msg) throws Exception {
EchoRequestProto req = EchoRequestProto.newBuilder()
.setMessage(msg)
.build();
EchoResponseProto responseProto = proxy.echo(null, req);
return responseProto.getMessage();
}
};
} else if (opts.rpcEngine == WritableRpcEngine.class) {
final TestProtocol proxy = RPC.getProxy(
TestProtocol.class, TestProtocol.versionID, addr, conf);
return new RpcServiceWrapper() {
@Override
public String doEcho(String msg) throws Exception {
return proxy.echo(msg);
}
};
} else {
throw new RuntimeException("unsupported engine: " + opts.rpcEngine);
}
}
public static void main(String []args) throws Exception {
int rc = ToolRunner.run(new RPCCallBenchmark(), args);
System.exit(rc);
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
}
| 14,735 | 33.032333 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.junit.Test;
public class TestCallQueueManager {
private CallQueueManager<FakeCall> manager;
public class FakeCall {
public final int tag; // Can be used for unique identification
public FakeCall(int tag) {
this.tag = tag;
}
}
/**
* Putter produces FakeCalls
*/
public class Putter implements Runnable {
private final CallQueueManager<FakeCall> cq;
public final int tag;
public volatile int callsAdded = 0; // How many calls we added, accurate unless interrupted
private final int maxCalls;
private volatile boolean isRunning = true;
public Putter(CallQueueManager<FakeCall> aCq, int maxCalls, int tag) {
this.maxCalls = maxCalls;
this.cq = aCq;
this.tag = tag;
}
public void run() {
try {
// Fill up to max (which is infinite if maxCalls < 0)
while (isRunning && (callsAdded < maxCalls || maxCalls < 0)) {
cq.put(new FakeCall(this.tag));
callsAdded++;
}
} catch (InterruptedException e) {
return;
}
}
public void stop() {
this.isRunning = false;
}
}
/**
* Taker consumes FakeCalls
*/
public class Taker implements Runnable {
private final CallQueueManager<FakeCall> cq;
public final int tag; // if >= 0 means we will only take the matching tag, and put back
// anything else
public volatile int callsTaken = 0; // total calls taken, accurate if we aren't interrupted
public volatile FakeCall lastResult = null; // the last thing we took
private final int maxCalls; // maximum calls to take
public Taker(CallQueueManager<FakeCall> aCq, int maxCalls, int tag) {
this.maxCalls = maxCalls;
this.cq = aCq;
this.tag = tag;
}
public void run() {
try {
// Take while we don't exceed maxCalls, or if maxCalls is undefined (< 0)
while (callsTaken < maxCalls || maxCalls < 0) {
FakeCall res = cq.take();
if (tag >= 0 && res.tag != this.tag) {
// This call does not match our tag, we should put it back and try again
cq.put(res);
} else {
callsTaken++;
lastResult = res;
}
}
} catch (InterruptedException e) {
return;
}
}
}
// Assert we can take exactly the numberOfTakes
public void assertCanTake(CallQueueManager<FakeCall> cq, int numberOfTakes,
int takeAttempts) throws InterruptedException {
Taker taker = new Taker(cq, takeAttempts, -1);
Thread t = new Thread(taker);
t.start();
t.join(100);
assertEquals(taker.callsTaken, numberOfTakes);
t.interrupt();
}
// Assert we can put exactly the numberOfPuts
public void assertCanPut(CallQueueManager<FakeCall> cq, int numberOfPuts,
int putAttempts) throws InterruptedException {
Putter putter = new Putter(cq, putAttempts, -1);
Thread t = new Thread(putter);
t.start();
t.join(100);
assertEquals(putter.callsAdded, numberOfPuts);
t.interrupt();
}
private static final Class<? extends BlockingQueue<FakeCall>> queueClass
= CallQueueManager.convertQueueClass(LinkedBlockingQueue.class, FakeCall.class);
@Test
public void testCallQueueCapacity() throws InterruptedException {
manager = new CallQueueManager<FakeCall>(queueClass, false, 10, "", null);
assertCanPut(manager, 10, 20); // Will stop at 10 due to capacity
}
@Test
public void testEmptyConsume() throws InterruptedException {
manager = new CallQueueManager<FakeCall>(queueClass, false, 10, "", null);
assertCanTake(manager, 0, 1); // Fails since it's empty
}
@Test(timeout=60000)
public void testSwapUnderContention() throws InterruptedException {
manager = new CallQueueManager<FakeCall>(queueClass, false, 5000, "", null);
ArrayList<Putter> producers = new ArrayList<Putter>();
ArrayList<Taker> consumers = new ArrayList<Taker>();
HashMap<Runnable, Thread> threads = new HashMap<Runnable, Thread>();
// Create putters and takers
for (int i=0; i < 1000; i++) {
Putter p = new Putter(manager, -1, -1);
Thread pt = new Thread(p);
producers.add(p);
threads.put(p, pt);
pt.start();
}
for (int i=0; i < 100; i++) {
Taker t = new Taker(manager, -1, -1);
Thread tt = new Thread(t);
consumers.add(t);
threads.put(t, tt);
tt.start();
}
Thread.sleep(500);
for (int i=0; i < 5; i++) {
manager.swapQueue(queueClass, 5000, "", null);
}
// Stop the producers
for (Putter p : producers) {
p.stop();
}
// Wait for consumers to wake up, then consume
Thread.sleep(2000);
assertEquals(0, manager.size());
// Ensure no calls were dropped
long totalCallsCreated = 0;
for (Putter p : producers) {
threads.get(p).interrupt();
}
for (Putter p : producers) {
threads.get(p).join();
totalCallsCreated += p.callsAdded;
}
long totalCallsConsumed = 0;
for (Taker t : consumers) {
threads.get(t).interrupt();
}
for (Taker t : consumers) {
threads.get(t).join();
totalCallsConsumed += t.callsTaken;
}
assertEquals(totalCallsConsumed, totalCallsCreated);
}
}
| 6,351 | 27.612613 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCallBenchmark.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.*;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
public class TestRPCCallBenchmark {
@Test(timeout=20000)
public void testBenchmarkWithWritable() throws Exception {
int rc = ToolRunner.run(new RPCCallBenchmark(),
new String[] {
"--clientThreads", "30",
"--serverThreads", "30",
"--time", "5",
"--serverReaderThreads", "4",
"--messageSize", "1024",
"--engine", "writable"});
assertEquals(0, rc);
}
@Test(timeout=20000)
public void testBenchmarkWithProto() throws Exception {
int rc = ToolRunner.run(new RPCCallBenchmark(),
new String[] {
"--clientThreads", "30",
"--serverThreads", "30",
"--time", "5",
"--serverReaderThreads", "4",
"--messageSize", "1024",
"--engine", "protobuf"});
assertEquals(0, rc);
}
}
| 1,720 | 30.87037 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
import org.apache.hadoop.net.NetUtils;
import org.junit.Before;
import org.junit.After;
import org.junit.Test;
import com.google.protobuf.BlockingService;
public class TestMultipleProtocolServer {
private static final String ADDRESS = "0.0.0.0";
private static InetSocketAddress addr;
private static RPC.Server server;
private static Configuration conf = new Configuration();
@ProtocolInfo(protocolName="Foo")
interface Foo0 extends VersionedProtocol {
public static final long versionID = 0L;
String ping() throws IOException;
}
@ProtocolInfo(protocolName="Foo")
interface Foo1 extends VersionedProtocol {
public static final long versionID = 1L;
String ping() throws IOException;
String ping2() throws IOException;
}
@ProtocolInfo(protocolName="Foo")
interface FooUnimplemented extends VersionedProtocol {
public static final long versionID = 2L;
String ping() throws IOException;
}
interface Mixin extends VersionedProtocol{
public static final long versionID = 0L;
void hello() throws IOException;
}
interface Bar extends Mixin {
public static final long versionID = 0L;
int echo(int i) throws IOException;
}
class Foo0Impl implements Foo0 {
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return Foo0.versionID;
}
@SuppressWarnings("unchecked")
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
Class<? extends VersionedProtocol> inter;
try {
inter = (Class<? extends VersionedProtocol>)getClass().
getGenericInterfaces()[0];
} catch (Exception e) {
throw new IOException(e);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
getProtocolVersion(protocol, clientVersion), inter);
}
@Override
public String ping() {
return "Foo0";
}
}
class Foo1Impl implements Foo1 {
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return Foo1.versionID;
}
@SuppressWarnings("unchecked")
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
Class<? extends VersionedProtocol> inter;
try {
inter = (Class<? extends VersionedProtocol>)getClass().
getGenericInterfaces()[0];
} catch (Exception e) {
throw new IOException(e);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
getProtocolVersion(protocol, clientVersion), inter);
}
@Override
public String ping() {
return "Foo1";
}
@Override
public String ping2() {
return "Foo1";
}
}
class BarImpl implements Bar {
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return Bar.versionID;
}
@SuppressWarnings("unchecked")
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
Class<? extends VersionedProtocol> inter;
try {
inter = (Class<? extends VersionedProtocol>)getClass().
getGenericInterfaces()[0];
} catch (Exception e) {
throw new IOException(e);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
getProtocolVersion(protocol, clientVersion), inter);
}
@Override
public int echo(int i) {
return i;
}
@Override
public void hello() {
}
}
@Before
public void setUp() throws Exception {
// create a server with two handlers
server = new RPC.Builder(conf).setProtocol(Foo0.class)
.setInstance(new Foo0Impl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
// Add Protobuf server
// Create server side implementation
PBServerImpl pbServerImpl =
new PBServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(pbServerImpl);
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
service);
server.start();
addr = NetUtils.getConnectAddress(server);
}
@After
public void tearDown() throws Exception {
server.stop();
}
@Test
public void test1() throws IOException {
ProtocolProxy<?> proxy;
proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf);
Foo0 foo0 = (Foo0)proxy.getProxy();
Assert.assertEquals("Foo0", foo0.ping());
proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf);
Foo1 foo1 = (Foo1)proxy.getProxy();
Assert.assertEquals("Foo1", foo1.ping());
Assert.assertEquals("Foo1", foo1.ping());
proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf);
Bar bar = (Bar)proxy.getProxy();
Assert.assertEquals(99, bar.echo(99));
// Now test Mixin class method
Mixin mixin = bar;
mixin.hello();
}
// Server does not implement the FooUnimplemented version of protocol Foo.
// See that calls to it fail.
@Test(expected=IOException.class)
public void testNonExistingProtocol() throws IOException {
ProtocolProxy<?> proxy;
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
FooUnimplemented.versionID, addr, conf);
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
foo.ping();
}
/**
* getProtocolVersion of an unimplemented version should return highest version
* Similarly getProtocolSignature should work.
* @throws IOException
*/
@Test
public void testNonExistingProtocol2() throws IOException {
ProtocolProxy<?> proxy;
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
FooUnimplemented.versionID, addr, conf);
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
Assert.assertEquals(Foo1.versionID,
foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),
FooUnimplemented.versionID));
foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),
FooUnimplemented.versionID, 0);
}
@Test(expected=IOException.class)
public void testIncorrectServerCreation() throws IOException {
new RPC.Builder(conf).setProtocol(Foo1.class).setInstance(new Foo0Impl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false)
.build();
}
// Now test a PB service - a server hosts both PB and Writable Rpcs.
@Test
public void testPBService() throws Exception {
// Set RPC engine to protobuf RPC engine
Configuration conf2 = new Configuration();
RPC.setProtocolEngine(conf2, TestRpcService.class,
ProtobufRpcEngine.class);
TestRpcService client = RPC.getProxy(TestRpcService.class, 0, addr, conf2);
TestProtoBufRpc.testProtoBufRpc(client);
}
}
| 8,719 | 29.812721 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertSame;
import java.util.HashMap;
import java.util.Map;
import javax.net.SocketFactory;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.StandardSocketFactory;
import org.junit.Test;
public class TestSocketFactory {
@Test
public void testSocketFactoryAsKeyInMap() {
Map<SocketFactory, Integer> dummyCache = new HashMap<SocketFactory, Integer>();
int toBeCached1 = 1;
int toBeCached2 = 2;
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory");
final SocketFactory dummySocketFactory = NetUtils
.getDefaultSocketFactory(conf);
dummyCache.put(dummySocketFactory, toBeCached1);
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
"org.apache.hadoop.net.StandardSocketFactory");
final SocketFactory defaultSocketFactory = NetUtils
.getDefaultSocketFactory(conf);
dummyCache.put(defaultSocketFactory, toBeCached2);
Assert
.assertEquals("The cache contains two elements", 2, dummyCache.size());
Assert.assertEquals("Equals of both socket factory shouldn't be same",
defaultSocketFactory.equals(dummySocketFactory), false);
assertSame(toBeCached2, dummyCache.remove(defaultSocketFactory));
dummyCache.put(defaultSocketFactory, toBeCached2);
assertSame(toBeCached1, dummyCache.remove(dummySocketFactory));
}
/**
* A dummy socket factory class that extends the StandardSocketFactory.
*/
static class DummySocketFactory extends StandardSocketFactory {
}
}
| 2,658 | 35.424658 | 83 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.