repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.Parser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.HarFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileRecordReader;
import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Charsets;
/**
* a archive creation utility.
* This class provides methods that can be used
* to create hadoop archives. For understanding of
* Hadoop archives look at {@link HarFileSystem}.
*/
public class HadoopArchives implements Tool {
public static final int VERSION = 3;
private static final Log LOG = LogFactory.getLog(HadoopArchives.class);
private static final String NAME = "har";
private static final String ARCHIVE_NAME = "archiveName";
private static final String REPLICATION = "r";
private static final String PARENT_PATH = "p";
private static final String HELP = "help";
static final String SRC_LIST_LABEL = NAME + ".src.list";
static final String DST_DIR_LABEL = NAME + ".dest.path";
static final String TMP_DIR_LABEL = NAME + ".tmp.dir";
static final String JOB_DIR_LABEL = NAME + ".job.dir";
static final String SRC_COUNT_LABEL = NAME + ".src.count";
static final String TOTAL_SIZE_LABEL = NAME + ".total.size";
static final String DST_HAR_LABEL = NAME + ".archive.name";
static final String SRC_PARENT_LABEL = NAME + ".parent.path";
/** the size of the blocks that will be created when archiving **/
static final String HAR_BLOCKSIZE_LABEL = NAME + ".block.size";
/** the replication factor for the file in archiving. **/
static final String HAR_REPLICATION_LABEL = NAME + ".replication.factor";
/** the size of the part files that will be created when archiving **/
static final String HAR_PARTSIZE_LABEL = NAME + ".partfile.size";
/** size of each part file size **/
long partSize = 2 * 1024 * 1024 * 1024l;
/** size of blocks in hadoop archives **/
long blockSize = 512 * 1024 * 1024l;
/** the desired replication degree; default is 3 **/
short repl = 3;
private static final String usage = "archive"
+ " <-archiveName <NAME>.har> <-p <parent path>> [-r <replication factor>]" +
" <src>* <dest>" +
"\n";
private JobConf conf;
public void setConf(Configuration conf) {
if (conf instanceof JobConf) {
this.conf = (JobConf) conf;
} else {
this.conf = new JobConf(conf, HadoopArchives.class);
}
// This is for test purposes since MR2, different from Streaming
// here it is not possible to add a JAR to the classpath the tool
// will when running the mapreduce job.
String testJar = System.getProperty(TEST_HADOOP_ARCHIVES_JAR_PATH, null);
if (testJar != null) {
this.conf.setJar(testJar);
}
}
public Configuration getConf() {
return this.conf;
}
public HadoopArchives(Configuration conf) {
setConf(conf);
}
// check the src paths
private static void checkPaths(Configuration conf, List<Path> paths) throws
IOException {
for (Path p : paths) {
FileSystem fs = p.getFileSystem(conf);
if (!fs.exists(p)) {
throw new FileNotFoundException("Source " + p + " does not exist.");
}
}
}
/**
* this assumes that there are two types of files file/dir
* @param fs the input filesystem
* @param fdir the filestatusdir of the path
* @param out the list of paths output of recursive ls
* @throws IOException
*/
private void recursivels(FileSystem fs, FileStatusDir fdir, List<FileStatusDir> out)
throws IOException {
if (fdir.getFileStatus().isFile()) {
out.add(fdir);
return;
}
else {
out.add(fdir);
FileStatus[] listStatus = fs.listStatus(fdir.getFileStatus().getPath());
fdir.setChildren(listStatus);
for (FileStatus stat: listStatus) {
FileStatusDir fstatDir = new FileStatusDir(stat, null);
recursivels(fs, fstatDir, out);
}
}
}
/** HarEntry is used in the {@link HArchivesMapper} as the input value. */
private static class HarEntry implements Writable {
String path;
String[] children;
HarEntry() {}
HarEntry(String path, String[] children) {
this.path = path;
this.children = children;
}
boolean isDir() {
return children != null;
}
@Override
public void readFields(DataInput in) throws IOException {
path = Text.readString(in);
if (in.readBoolean()) {
children = new String[in.readInt()];
for(int i = 0; i < children.length; i++) {
children[i] = Text.readString(in);
}
} else {
children = null;
}
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, path);
final boolean dir = isDir();
out.writeBoolean(dir);
if (dir) {
out.writeInt(children.length);
for(String c : children) {
Text.writeString(out, c);
}
}
}
}
/**
* Input format of a hadoop archive job responsible for
* generating splits of the file list
*/
static class HArchiveInputFormat implements InputFormat<LongWritable, HarEntry> {
//generate input splits from the src file lists
public InputSplit[] getSplits(JobConf jconf, int numSplits)
throws IOException {
String srcfilelist = jconf.get(SRC_LIST_LABEL, "");
if ("".equals(srcfilelist)) {
throw new IOException("Unable to get the " +
"src file for archive generation.");
}
long totalSize = jconf.getLong(TOTAL_SIZE_LABEL, -1);
if (totalSize == -1) {
throw new IOException("Invalid size of files to archive");
}
//we should be safe since this is set by our own code
Path src = new Path(srcfilelist);
FileSystem fs = src.getFileSystem(jconf);
FileStatus fstatus = fs.getFileStatus(src);
ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
LongWritable key = new LongWritable();
final HarEntry value = new HarEntry();
// the remaining bytes in the file split
long remaining = fstatus.getLen();
// the count of sizes calculated till now
long currentCount = 0L;
// the endposition of the split
long lastPos = 0L;
// the start position of the split
long startPos = 0L;
long targetSize = totalSize/numSplits;
// create splits of size target size so that all the maps
// have equals sized data to read and write to.
try (SequenceFile.Reader reader = new SequenceFile.Reader(fs, src, jconf)) {
while(reader.next(key, value)) {
if (currentCount + key.get() > targetSize && currentCount != 0){
long size = lastPos - startPos;
splits.add(new FileSplit(src, startPos, size, (String[]) null));
remaining = remaining - size;
startPos = lastPos;
currentCount = 0L;
}
currentCount += key.get();
lastPos = reader.getPosition();
}
// the remaining not equal to the target size.
if (remaining != 0) {
splits.add(new FileSplit(src, startPos, remaining, (String[])null));
}
}
return splits.toArray(new FileSplit[splits.size()]);
}
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
(FileSplit)split);
}
}
private boolean checkValidName(String name) {
Path tmp = new Path(name);
if (tmp.depth() != 1) {
return false;
}
if (name.endsWith(".har"))
return true;
return false;
}
private Path largestDepth(List<Path> paths) {
Path deepest = paths.get(0);
for (Path p: paths) {
if (p.depth() > deepest.depth()) {
deepest = p;
}
}
return deepest;
}
/**
* truncate the prefix root from the full path
* @param fullPath the full path
* @param root the prefix root to be truncated
* @return the relative path
*/
private Path relPathToRoot(Path fullPath, Path root) {
// just take some effort to do it
// rather than just using substring
// so that we do not break sometime later
final Path justRoot = new Path(Path.SEPARATOR);
if (fullPath.depth() == root.depth()) {
return justRoot;
}
else if (fullPath.depth() > root.depth()) {
Path retPath = new Path(fullPath.getName());
Path parent = fullPath.getParent();
for (int i=0; i < (fullPath.depth() - root.depth() -1); i++) {
retPath = new Path(parent.getName(), retPath);
parent = parent.getParent();
}
return new Path(justRoot, retPath);
}
return null;
}
/**
* this method writes all the valid top level directories
* into the srcWriter for indexing. This method is a little
* tricky. example-
* for an input with parent path /home/user/ and sources
* as /home/user/source/dir1, /home/user/source/dir2 - this
* will output <source, dir, dir1, dir2> (dir means that source is a dir
* with dir1 and dir2 as children) and <source/dir1, file, null>
* and <source/dir2, file, null>
* @param srcWriter the sequence file writer to write the
* directories to
* @param paths the source paths provided by the user. They
* are glob free and have full path (not relative paths)
* @param parentPath the parent path that you want the archives
* to be relative to. example - /home/user/dir1 can be archived with
* parent as /home or /home/user.
* @throws IOException
*/
private void writeTopLevelDirs(SequenceFile.Writer srcWriter,
List<Path> paths, Path parentPath) throws IOException {
// extract paths from absolute URI's
List<Path> justPaths = new ArrayList<Path>();
for (Path p: paths) {
justPaths.add(new Path(p.toUri().getPath()));
}
/* find all the common parents of paths that are valid archive
* paths. The below is done so that we do not add a common path
* twice and also we need to only add valid child of a path that
* are specified the user.
*/
TreeMap<String, HashSet<String>> allpaths = new TreeMap<String,
HashSet<String>>();
/* the largest depth of paths. the max number of times
* we need to iterate
*/
Path deepest = largestDepth(paths);
Path root = new Path(Path.SEPARATOR);
for (int i = parentPath.depth(); i < deepest.depth(); i++) {
List<Path> parents = new ArrayList<Path>();
for (Path p: justPaths) {
if (p.compareTo(root) == 0){
//do nothing
}
else {
Path parent = p.getParent();
if (null != parent) {
if (allpaths.containsKey(parent.toString())) {
HashSet<String> children = allpaths.get(parent.toString());
children.add(p.getName());
}
else {
HashSet<String> children = new HashSet<String>();
children.add(p.getName());
allpaths.put(parent.toString(), children);
}
parents.add(parent);
}
}
}
justPaths = parents;
}
Set<Map.Entry<String, HashSet<String>>> keyVals = allpaths.entrySet();
for (Map.Entry<String, HashSet<String>> entry : keyVals) {
final Path relPath = relPathToRoot(new Path(entry.getKey()), parentPath);
if (relPath != null) {
final String[] children = new String[entry.getValue().size()];
int i = 0;
for(String child: entry.getValue()) {
children[i++] = child;
}
append(srcWriter, 0L, relPath.toString(), children);
}
}
}
private void append(SequenceFile.Writer srcWriter, long len,
String path, String[] children) throws IOException {
srcWriter.append(new LongWritable(len), new HarEntry(path, children));
}
/**
* A static class that keeps
* track of status of a path
* and there children if path is a dir
*/
static class FileStatusDir {
private FileStatus fstatus;
private FileStatus[] children = null;
/**
* constructor for filestatusdir
* @param fstatus the filestatus object that maps to filestatusdir
* @param children the children list if fs is a directory
*/
FileStatusDir(FileStatus fstatus, FileStatus[] children) {
this.fstatus = fstatus;
this.children = children;
}
/**
* set children of this object
* @param listStatus the list of children
*/
public void setChildren(FileStatus[] listStatus) {
this.children = listStatus;
}
/**
* the filestatus of this object
* @return the filestatus of this object
*/
FileStatus getFileStatus() {
return this.fstatus;
}
/**
* the children list of this object, null if
* @return the children list
*/
FileStatus[] getChildren() {
return this.children;
}
}
/**archive the given source paths into
* the dest
* @param parentPath the parent path of all the source paths
* @param srcPaths the src paths to be archived
* @param dest the dest dir that will contain the archive
*/
void archive(Path parentPath, List<Path> srcPaths,
String archiveName, Path dest) throws IOException {
checkPaths(conf, srcPaths);
int numFiles = 0;
long totalSize = 0;
FileSystem fs = parentPath.getFileSystem(conf);
this.blockSize = conf.getLong(HAR_BLOCKSIZE_LABEL, blockSize);
this.partSize = conf.getLong(HAR_PARTSIZE_LABEL, partSize);
conf.setLong(HAR_BLOCKSIZE_LABEL, blockSize);
conf.setLong(HAR_PARTSIZE_LABEL, partSize);
conf.set(DST_HAR_LABEL, archiveName);
conf.set(SRC_PARENT_LABEL, parentPath.makeQualified(fs).toString());
conf.setInt(HAR_REPLICATION_LABEL, repl);
Path outputPath = new Path(dest, archiveName);
FileOutputFormat.setOutputPath(conf, outputPath);
FileSystem outFs = outputPath.getFileSystem(conf);
if (outFs.exists(outputPath)) {
throw new IOException("Archive path: "
+ outputPath.toString() + " already exists");
}
if (outFs.isFile(dest)) {
throw new IOException("Destination " + dest.toString()
+ " should be a directory but is a file");
}
conf.set(DST_DIR_LABEL, outputPath.toString());
Path stagingArea;
try {
stagingArea = JobSubmissionFiles.getStagingDir(new Cluster(conf),
conf);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
Path jobDirectory = new Path(stagingArea,
NAME+"_"+Integer.toString(new Random().nextInt(Integer.MAX_VALUE), 36));
FsPermission mapredSysPerms =
new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
FileSystem.mkdirs(jobDirectory.getFileSystem(conf), jobDirectory,
mapredSysPerms);
conf.set(JOB_DIR_LABEL, jobDirectory.toString());
//get a tmp directory for input splits
FileSystem jobfs = jobDirectory.getFileSystem(conf);
Path srcFiles = new Path(jobDirectory, "_har_src_files");
conf.set(SRC_LIST_LABEL, srcFiles.toString());
SequenceFile.Writer srcWriter = SequenceFile.createWriter(jobfs, conf,
srcFiles, LongWritable.class, HarEntry.class,
SequenceFile.CompressionType.NONE);
// get the list of files
// create single list of files and dirs
try {
// write the top level dirs in first
writeTopLevelDirs(srcWriter, srcPaths, parentPath);
srcWriter.sync();
// these are the input paths passed
// from the command line
// we do a recursive ls on these paths
// and then write them to the input file
// one at a time
for (Path src: srcPaths) {
ArrayList<FileStatusDir> allFiles = new ArrayList<FileStatusDir>();
FileStatus fstatus = fs.getFileStatus(src);
FileStatusDir fdir = new FileStatusDir(fstatus, null);
recursivels(fs, fdir, allFiles);
for (FileStatusDir statDir: allFiles) {
FileStatus stat = statDir.getFileStatus();
long len = stat.isDirectory()? 0:stat.getLen();
final Path path = relPathToRoot(stat.getPath(), parentPath);
final String[] children;
if (stat.isDirectory()) {
//get the children
FileStatus[] list = statDir.getChildren();
children = new String[list.length];
for (int i = 0; i < list.length; i++) {
children[i] = list[i].getPath().getName();
}
}
else {
children = null;
}
append(srcWriter, len, path.toString(), children);
srcWriter.sync();
numFiles++;
totalSize += len;
}
}
} finally {
srcWriter.close();
}
conf.setInt(SRC_COUNT_LABEL, numFiles);
conf.setLong(TOTAL_SIZE_LABEL, totalSize);
int numMaps = (int)(totalSize/partSize);
//run atleast one map.
conf.setNumMapTasks(numMaps == 0? 1:numMaps);
conf.setNumReduceTasks(1);
conf.setInputFormat(HArchiveInputFormat.class);
conf.setOutputFormat(NullOutputFormat.class);
conf.setMapperClass(HArchivesMapper.class);
conf.setReducerClass(HArchivesReducer.class);
conf.setMapOutputKeyClass(IntWritable.class);
conf.setMapOutputValueClass(Text.class);
FileInputFormat.addInputPath(conf, jobDirectory);
//make sure no speculative execution is done
conf.setSpeculativeExecution(false);
JobClient.runJob(conf);
//delete the tmp job directory
try {
jobfs.delete(jobDirectory, true);
} catch(IOException ie) {
LOG.info("Unable to clean tmp directory " + jobDirectory);
}
}
static class HArchivesMapper
implements Mapper<LongWritable, HarEntry, IntWritable, Text> {
private JobConf conf = null;
int partId = -1 ;
Path tmpOutputDir = null;
Path tmpOutput = null;
String partname = null;
Path rootPath = null;
FSDataOutputStream partStream = null;
FileSystem destFs = null;
byte[] buffer;
int buf_size = 128 * 1024;
private int replication = 3;
long blockSize = 512 * 1024 * 1024l;
// configure the mapper and create
// the part file.
// use map reduce framework to write into
// tmp files.
public void configure(JobConf conf) {
this.conf = conf;
replication = conf.getInt(HAR_REPLICATION_LABEL, 3);
// this is tightly tied to map reduce
// since it does not expose an api
// to get the partition
partId = conf.getInt(MRJobConfig.TASK_PARTITION, -1);
// create a file name using the partition
// we need to write to this directory
tmpOutputDir = FileOutputFormat.getWorkOutputPath(conf);
blockSize = conf.getLong(HAR_BLOCKSIZE_LABEL, blockSize);
// get the output path and write to the tmp
// directory
partname = "part-" + partId;
tmpOutput = new Path(tmpOutputDir, partname);
rootPath = (conf.get(SRC_PARENT_LABEL, null) == null) ? null :
new Path(conf.get(SRC_PARENT_LABEL));
if (rootPath == null) {
throw new RuntimeException("Unable to read parent " +
"path for har from config");
}
try {
destFs = tmpOutput.getFileSystem(conf);
//this was a stale copy
if (destFs.exists(tmpOutput)) {
destFs.delete(tmpOutput, false);
}
partStream = destFs.create(tmpOutput, false, conf.getInt("io.file.buffer.size", 4096),
destFs.getDefaultReplication(tmpOutput), blockSize);
} catch(IOException ie) {
throw new RuntimeException("Unable to open output file " + tmpOutput, ie);
}
buffer = new byte[buf_size];
}
// copy raw data.
public void copyData(Path input, FSDataInputStream fsin,
FSDataOutputStream fout, Reporter reporter) throws IOException {
try {
for (int cbread=0; (cbread = fsin.read(buffer))>= 0;) {
fout.write(buffer, 0,cbread);
reporter.progress();
}
} finally {
fsin.close();
}
}
/**
* get rid of / in the beginning of path
* @param p the path
* @return return path without /
*/
private Path realPath(Path p, Path parent) {
Path rootPath = new Path(Path.SEPARATOR);
if (rootPath.compareTo(p) == 0) {
return parent;
}
return new Path(parent, new Path(p.toString().substring(1)));
}
private static String encodeName(String s)
throws UnsupportedEncodingException {
return URLEncoder.encode(s,"UTF-8");
}
private static String encodeProperties( FileStatus fStatus )
throws UnsupportedEncodingException {
String propStr = encodeName(
fStatus.getModificationTime() + " "
+ fStatus.getPermission().toShort() + " "
+ encodeName(fStatus.getOwner()) + " "
+ encodeName(fStatus.getGroup()));
return propStr;
}
// read files from the split input
// and write it onto the part files.
// also output hash(name) and string
// for reducer to create index
// and masterindex files.
public void map(LongWritable key, HarEntry value,
OutputCollector<IntWritable, Text> out,
Reporter reporter) throws IOException {
Path relPath = new Path(value.path);
int hash = HarFileSystem.getHarHash(relPath);
String towrite = null;
Path srcPath = realPath(relPath, rootPath);
long startPos = partStream.getPos();
FileSystem srcFs = srcPath.getFileSystem(conf);
FileStatus srcStatus = srcFs.getFileStatus(srcPath);
String propStr = encodeProperties(srcStatus);
if (value.isDir()) {
towrite = encodeName(relPath.toString())
+ " dir " + propStr + " 0 0 ";
StringBuffer sbuff = new StringBuffer();
sbuff.append(towrite);
for (String child: value.children) {
sbuff.append(encodeName(child) + " ");
}
towrite = sbuff.toString();
//reading directories is also progress
reporter.progress();
}
else {
FSDataInputStream input = srcFs.open(srcStatus.getPath());
reporter.setStatus("Copying file " + srcStatus.getPath() +
" to archive.");
copyData(srcStatus.getPath(), input, partStream, reporter);
towrite = encodeName(relPath.toString())
+ " file " + partname + " " + startPos
+ " " + srcStatus.getLen() + " " + propStr + " ";
}
out.collect(new IntWritable(hash), new Text(towrite));
}
public void close() throws IOException {
// close the part files.
partStream.close();
destFs.setReplication(tmpOutput, (short) replication);
}
}
/** the reduce for creating the index and the master index
*
*/
static class HArchivesReducer implements Reducer<IntWritable,
Text, Text, Text> {
private JobConf conf = null;
private long startIndex = 0;
private long endIndex = 0;
private long startPos = 0;
private Path masterIndex = null;
private Path index = null;
private FileSystem fs = null;
private FSDataOutputStream outStream = null;
private FSDataOutputStream indexStream = null;
private int numIndexes = 1000;
private Path tmpOutputDir = null;
private int written = 0;
private int replication = 3;
private int keyVal = 0;
// configure
public void configure(JobConf conf) {
this.conf = conf;
tmpOutputDir = FileOutputFormat.getWorkOutputPath(this.conf);
masterIndex = new Path(tmpOutputDir, "_masterindex");
index = new Path(tmpOutputDir, "_index");
replication = conf.getInt(HAR_REPLICATION_LABEL, 3);
try {
fs = masterIndex.getFileSystem(conf);
if (fs.exists(masterIndex)) {
fs.delete(masterIndex, false);
}
if (fs.exists(index)) {
fs.delete(index, false);
}
indexStream = fs.create(index);
outStream = fs.create(masterIndex);
String version = VERSION + " \n";
outStream.write(version.getBytes(Charsets.UTF_8));
} catch(IOException e) {
throw new RuntimeException(e);
}
}
// create the index and master index. The input to
// the reduce is already sorted by the hash of the
// files. SO we just need to write it to the index.
// We update the masterindex as soon as we update
// numIndex entries.
public void reduce(IntWritable key, Iterator<Text> values,
OutputCollector<Text, Text> out,
Reporter reporter) throws IOException {
keyVal = key.get();
while(values.hasNext()) {
Text value = values.next();
String towrite = value.toString() + "\n";
indexStream.write(towrite.getBytes(Charsets.UTF_8));
written++;
if (written > numIndexes -1) {
// every 1000 indexes we report status
reporter.setStatus("Creating index for archives");
reporter.progress();
endIndex = keyVal;
String masterWrite = startIndex + " " + endIndex + " " + startPos
+ " " + indexStream.getPos() + " \n" ;
outStream.write(masterWrite.getBytes(Charsets.UTF_8));
startPos = indexStream.getPos();
startIndex = endIndex;
written = 0;
}
}
}
public void close() throws IOException {
//write the last part of the master index.
if (written > 0) {
String masterWrite = startIndex + " " + keyVal + " " + startPos +
" " + indexStream.getPos() + " \n";
outStream.write(masterWrite.getBytes(Charsets.UTF_8));
}
// close the streams
outStream.close();
indexStream.close();
// try increasing the replication
fs.setReplication(index, (short) replication);
fs.setReplication(masterIndex, (short) replication);
}
}
private void printUsage(Options opts, boolean printDetailed) {
HelpFormatter helpFormatter = new HelpFormatter();
if (printDetailed) {
helpFormatter.printHelp(usage.length() + 10, usage, null, opts, null,
false);
} else {
System.out.println(usage);
}
}
/** the main driver for creating the archives
* it takes at least three command line parameters. The parent path,
* The src and the dest. It does an lsr on the source paths.
* The mapper created archuves and the reducer creates
* the archive index.
*/
public int run(String[] args) throws Exception {
try {
// Parse CLI options
Options options = new Options();
options.addOption(ARCHIVE_NAME, true,
"Name of the Archive. This is mandatory option");
options.addOption(PARENT_PATH, true,
"Parent path of sources. This is mandatory option");
options.addOption(REPLICATION, true, "Replication factor archive files");
options.addOption(HELP, false, "Show the usage");
Parser parser = new GnuParser();
CommandLine commandLine = parser.parse(options, args, true);
if (commandLine.hasOption(HELP)) {
printUsage(options, true);
return 0;
}
if (!commandLine.hasOption(ARCHIVE_NAME)) {
printUsage(options, false);
throw new IOException("Archive Name not specified.");
}
String archiveName = commandLine.getOptionValue(ARCHIVE_NAME);
if (!checkValidName(archiveName)) {
printUsage(options, false);
throw new IOException("Invalid name for archives. " + archiveName);
}
//check to see if relative parent has been provided or not
//this is a required parameter.
if (!commandLine.hasOption(PARENT_PATH)) {
printUsage(options, false);
throw new IOException("Parent path not specified.");
}
Path parentPath = new Path(commandLine.getOptionValue(PARENT_PATH));
if (!parentPath.isAbsolute()) {
parentPath = parentPath.getFileSystem(getConf()).makeQualified(
parentPath);
}
if (commandLine.hasOption(REPLICATION)) {
repl = Short.parseShort(commandLine.getOptionValue(REPLICATION));
}
// Remaining args
args = commandLine.getArgs();
List<Path> srcPaths = new ArrayList<Path>();
Path destPath = null;
//read the rest of the paths
for (int i = 0; i < args.length; i++) {
if (i == (args.length - 1)) {
destPath = new Path(args[i]);
if (!destPath.isAbsolute()) {
destPath = destPath.getFileSystem(getConf()).makeQualified(destPath);
}
}
else {
Path argPath = new Path(args[i]);
if (argPath.isAbsolute()) {
printUsage(options, false);
throw new IOException("Source path " + argPath +
" is not relative to "+ parentPath);
}
srcPaths.add(new Path(parentPath, argPath));
}
}
if (destPath == null) {
printUsage(options, false);
throw new IOException("Destination path not specified.");
}
if (srcPaths.size() == 0) {
// assuming if the user does not specify path for sources
// the whole parent directory needs to be archived.
srcPaths.add(parentPath);
}
// do a glob on the srcPaths and then pass it on
List<Path> globPaths = new ArrayList<Path>();
for (Path p: srcPaths) {
FileSystem fs = p.getFileSystem(getConf());
FileStatus[] statuses = fs.globStatus(p);
if (statuses != null) {
for (FileStatus status: statuses) {
globPaths.add(fs.makeQualified(status.getPath()));
}
}
}
if (globPaths.isEmpty()) {
throw new IOException("The resolved paths set is empty."
+ " Please check whether the srcPaths exist, where srcPaths = "
+ srcPaths);
}
archive(parentPath, globPaths, archiveName, destPath);
} catch(IOException ie) {
System.err.println(ie.getLocalizedMessage());
return -1;
}
return 0;
}
static final String TEST_HADOOP_ARCHIVES_JAR_PATH = "test.hadoop.archives.jar";
/** the main functions **/
public static void main(String[] args) {
JobConf job = new JobConf(HadoopArchives.class);
HadoopArchives harchives = new HadoopArchives(job);
int ret = 0;
try{
ret = ToolRunner.run(harchives, args);
} catch(Exception e) {
LOG.debug("Exception in archives ", e);
System.err.println(e.getClass().getSimpleName() + " in archives");
final String s = e.getLocalizedMessage();
if (s != null) {
System.err.println(s);
} else {
e.printStackTrace(System.err);
}
System.exit(1);
}
System.exit(ret);
}
}
| 33,798 | 34.540484 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.contrib.utils.join.DataJoinReducerBase;
import org.apache.hadoop.contrib.utils.join.TaggedMapOutput;
/**
* This is a subclass of DataJoinReducerBase that is used to
* demonstrate the functionality of INNER JOIN between 2 data
* sources (TAB separated text files) based on the first column.
*/
public class SampleDataJoinReducer extends DataJoinReducerBase {
/**
*
* @param tags
* a list of source tags
* @param values
* a value per source
* @return combined value derived from values of the sources
*/
protected TaggedMapOutput combine(Object[] tags, Object[] values) {
// eliminate rows which didnot match in one of the two tables (for INNER JOIN)
if (tags.length < 2)
return null;
String joinedStr = "";
for (int i=0; i<tags.length; i++) {
if (i > 0)
joinedStr += "\t";
// strip first column as it is the key on which we joined
String line = ((Text) (((TaggedMapOutput) values[i]).getData())).toString();
String[] tokens = line.split("\\t", 2);
joinedStr += tokens[1];
}
TaggedMapOutput retv = new SampleTaggedMapOutput(new Text(joinedStr));
retv.setTag((Text) tags[0]);
return retv;
}
}
| 2,139 | 35.271186 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.contrib.utils.join.DataJoinMapperBase;
import org.apache.hadoop.contrib.utils.join.TaggedMapOutput;
import org.apache.hadoop.contrib.utils.join.SampleTaggedMapOutput;
/**
* This is a subclass of DataJoinMapperBase that is used to
* demonstrate the functionality of INNER JOIN between 2 data
* sources (TAB separated text files) based on the first column.
*/
public class SampleDataJoinMapper extends DataJoinMapperBase {
protected Text generateInputTag(String inputFile) {
// tag the row with input file name (data source)
return new Text(inputFile);
}
protected Text generateGroupKey(TaggedMapOutput aRecord) {
// first column in the input tab separated files becomes the key (to perform the JOIN)
String line = ((Text) aRecord.getData()).toString();
String groupKey = "";
String[] tokens = line.split("\\t", 2);
groupKey = tokens[0];
return new Text(groupKey);
}
protected TaggedMapOutput generateTaggedMapOutput(Object value) {
TaggedMapOutput retv = new SampleTaggedMapOutput((Text) value);
retv.setTag(new Text(this.inputTag));
return retv;
}
}
| 2,030 | 35.927273 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.contrib.utils.join.TaggedMapOutput;
/**
* This is a subclass of TaggedMapOutput that is used to
* demonstrate the functionality of INNER JOIN between 2 data
* sources (TAB separated text files) based on the first column.
*/
public class SampleTaggedMapOutput extends TaggedMapOutput {
private Text data;
public SampleTaggedMapOutput() {
this.data = new Text("");
}
public SampleTaggedMapOutput(Text data) {
this.data = data;
}
public Writable getData() {
return data;
}
public void write(DataOutput out) throws IOException {
this.tag.write(out);
this.data.write(out);
}
public void readFields(DataInput in) throws IOException {
this.tag.readFields(in);
this.data.readFields(in);
}
}
| 1,779 | 28.180328 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import java.io.IOException;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.extensions.TestSetup;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
public class TestDataJoin extends TestCase {
private static MiniDFSCluster cluster = null;
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestDataJoin.class)) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
protected void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
};
return setup;
}
public void testDataJoin() throws Exception {
final int srcs = 4;
JobConf job = new JobConf();
job.setBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs", false);
Path base = cluster.getFileSystem().makeQualified(new Path("/inner"));
Path[] src = writeSimpleSrc(base, job, srcs);
job.setInputFormat(SequenceFileInputFormat.class);
Path outdir = new Path(base, "out");
FileOutputFormat.setOutputPath(job, outdir);
job.setMapperClass(SampleDataJoinMapper.class);
job.setReducerClass(SampleDataJoinReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(SampleTaggedMapOutput.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setOutputFormat(TextOutputFormat.class);
job.setNumMapTasks(1);
job.setNumReduceTasks(1);
FileInputFormat.setInputPaths(job, src);
try {
JobClient.runJob(job);
confirmOutput(outdir, job, srcs);
} finally {
base.getFileSystem(job).delete(base, true);
}
}
private static void confirmOutput(Path out, JobConf job, int srcs)
throws IOException {
FileSystem fs = out.getFileSystem(job);
FileStatus[] outlist = fs.listStatus(out);
assertEquals(1, outlist.length);
assertTrue(0 < outlist[0].getLen());
FSDataInputStream in = fs.open(outlist[0].getPath());
LineRecordReader rr = new LineRecordReader(in, 0, Integer.MAX_VALUE, job);
LongWritable k = new LongWritable();
Text v = new Text();
int count = 0;
while (rr.next(k, v)) {
String[] vals = v.toString().split("\t");
assertEquals(srcs + 1, vals.length);
int[] ivals = new int[vals.length];
for (int i = 0; i < vals.length; ++i)
ivals[i] = Integer.parseInt(vals[i]);
assertEquals(0, ivals[0] % (srcs * srcs));
for (int i = 1; i < vals.length; ++i) {
assertEquals((ivals[i] - (i - 1)) * srcs, 10 * ivals[0]);
}
++count;
}
assertEquals(4, count);
}
private static SequenceFile.Writer[] createWriters(Path testdir,
JobConf conf, int srcs, Path[] src) throws IOException {
for (int i = 0; i < srcs; ++i) {
src[i] = new Path(testdir, Integer.toString(i + 10, 36));
}
SequenceFile.Writer out[] = new SequenceFile.Writer[srcs];
for (int i = 0; i < srcs; ++i) {
out[i] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf,
src[i], Text.class, Text.class);
}
return out;
}
private static Path[] writeSimpleSrc(Path testdir, JobConf conf,
int srcs) throws IOException {
SequenceFile.Writer out[] = null;
Path[] src = new Path[srcs];
try {
out = createWriters(testdir, conf, srcs, src);
final int capacity = srcs * 2 + 1;
Text key = new Text();
key.set("ignored");
Text val = new Text();
for (int k = 0; k < capacity; ++k) {
for (int i = 0; i < srcs; ++i) {
val.set(Integer.toString(k % srcs == 0 ? k * srcs : k * srcs + i) +
"\t" + Integer.toString(10 * k + i));
out[i].append(key, val);
if (i == k) {
// add duplicate key
out[i].append(key, val);
}
}
}
} finally {
if (out != null) {
for (int i = 0; i < srcs; ++i) {
if (out[i] != null)
out[i].close();
}
}
}
return src;
}
}
| 5,377 | 33.474359 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/JobBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.Map.Entry;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.Reducer;
/**
* A common base implementing some statics collecting mechanisms that are
* commonly used in a typical map/reduce job.
*
*/
public abstract class JobBase implements Mapper, Reducer {
public static final Log LOG = LogFactory.getLog("datajoin.job");
private SortedMap<Object, Long> longCounters = null;
private SortedMap<Object, Double> doubleCounters = null;
/**
* Set the given counter to the given value
*
* @param name
* the counter name
* @param value
* the value for the counter
*/
protected void setLongValue(Object name, long value) {
this.longCounters.put(name, Long.valueOf(value));
}
/**
* Set the given counter to the given value
*
* @param name
* the counter name
* @param value
* the value for the counter
*/
protected void setDoubleValue(Object name, double value) {
this.doubleCounters.put(name, new Double(value));
}
/**
*
* @param name
* the counter name
* @return return the value of the given counter.
*/
protected Long getLongValue(Object name) {
return this.longCounters.get(name);
}
/**
*
* @param name
* the counter name
* @return return the value of the given counter.
*/
protected Double getDoubleValue(Object name) {
return this.doubleCounters.get(name);
}
/**
* Increment the given counter by the given incremental value If the counter
* does not exist, one is created with value 0.
*
* @param name
* the counter name
* @param inc
* the incremental value
* @return the updated value.
*/
protected Long addLongValue(Object name, long inc) {
Long val = this.longCounters.get(name);
Long retv = null;
if (val == null) {
retv = Long.valueOf(inc);
} else {
retv = Long.valueOf(val.longValue() + inc);
}
this.longCounters.put(name, retv);
return retv;
}
/**
* Increment the given counter by the given incremental value If the counter
* does not exist, one is created with value 0.
*
* @param name
* the counter name
* @param inc
* the incremental value
* @return the updated value.
*/
protected Double addDoubleValue(Object name, double inc) {
Double val = this.doubleCounters.get(name);
Double retv = null;
if (val == null) {
retv = new Double(inc);
} else {
retv = new Double(val.doubleValue() + inc);
}
this.doubleCounters.put(name, retv);
return retv;
}
/**
* log the counters
*
*/
protected void report() {
LOG.info(getReport());
}
/**
* log the counters
*
*/
protected String getReport() {
StringBuffer sb = new StringBuffer();
Iterator iter = this.longCounters.entrySet().iterator();
while (iter.hasNext()) {
Entry e = (Entry) iter.next();
sb.append(e.getKey().toString()).append("\t").append(e.getValue())
.append("\n");
}
iter = this.doubleCounters.entrySet().iterator();
while (iter.hasNext()) {
Entry e = (Entry) iter.next();
sb.append(e.getKey().toString()).append("\t").append(e.getValue())
.append("\n");
}
return sb.toString();
}
/**
* Initializes a new instance from a {@link JobConf}.
*
* @param job
* the configuration
*/
public void configure(JobConf job) {
this.longCounters = new TreeMap<Object, Long>();
this.doubleCounters = new TreeMap<Object, Double>();
}
}
| 4,704 | 26.04023 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.JobConf;
/**
* This abstract class serves as the base class for the values that
* flow from the mappers to the reducers in a data join job.
* Typically, in such a job, the mappers will compute the source
* tag of an input record based on its attributes or based on the
* file name of the input file. This tag will be used by the reducers
* to re-group the values of a given key according to their source tags.
*
*/
public abstract class TaggedMapOutput implements Writable {
protected Text tag;
public TaggedMapOutput() {
this.tag = new Text("");
}
public Text getTag() {
return tag;
}
public void setTag(Text tag) {
this.tag = tag;
}
public abstract Writable getData();
public TaggedMapOutput clone(JobConf job) {
return (TaggedMapOutput) WritableUtils.clone(this, job);
}
}
| 1,835 | 31.210526 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
/**
* This class provides an implementation of ResetableIterator. The
* implementation will be based on ArrayList.
*
*
*/
public class ArrayListBackedIterator implements ResetableIterator {
private Iterator iter;
private ArrayList<Object> data;
public ArrayListBackedIterator() {
this(new ArrayList<Object>());
}
public ArrayListBackedIterator(ArrayList<Object> data) {
this.data = data;
this.iter = this.data.iterator();
}
public void add(Object item) {
this.data.add(item);
}
public boolean hasNext() {
return this.iter.hasNext();
}
public Object next() {
return this.iter.next();
}
public void remove() {
}
public void reset() {
this.iter = this.data.iterator();
}
public void close() throws IOException {
this.iter = null;
this.data = null;
}
}
| 1,778 | 24.056338 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.JobID;
/**
* This class implements the main function for creating a map/reduce
* job to join data of different sources. To create sucn a job, the
* user must implement a mapper class that extends DataJoinMapperBase class,
* and a reducer class that extends DataJoinReducerBase.
*
*/
public class DataJoinJob {
public static Class getClassByName(String className) {
Class retv = null;
try {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
retv = Class.forName(className, true, classLoader);
} catch (Exception e) {
throw new RuntimeException(e);
}
return retv;
}
public static JobConf createDataJoinJob(String args[]) throws IOException {
String inputDir = args[0];
String outputDir = args[1];
Class inputFormat = SequenceFileInputFormat.class;
if (args[2].compareToIgnoreCase("text") != 0) {
System.out.println("Using SequenceFileInputFormat: " + args[2]);
} else {
System.out.println("Using TextInputFormat: " + args[2]);
inputFormat = TextInputFormat.class;
}
int numOfReducers = Integer.parseInt(args[3]);
Class mapper = getClassByName(args[4]);
Class reducer = getClassByName(args[5]);
Class mapoutputValueClass = getClassByName(args[6]);
Class outputFormat = TextOutputFormat.class;
Class outputValueClass = Text.class;
if (args[7].compareToIgnoreCase("text") != 0) {
System.out.println("Using SequenceFileOutputFormat: " + args[7]);
outputFormat = SequenceFileOutputFormat.class;
outputValueClass = getClassByName(args[7]);
} else {
System.out.println("Using TextOutputFormat: " + args[7]);
}
long maxNumOfValuesPerGroup = 100;
String jobName = "";
if (args.length > 8) {
maxNumOfValuesPerGroup = Long.parseLong(args[8]);
}
if (args.length > 9) {
jobName = args[9];
}
Configuration defaults = new Configuration();
JobConf job = new JobConf(defaults, DataJoinJob.class);
job.setJobName("DataJoinJob: " + jobName);
FileSystem fs = FileSystem.get(defaults);
fs.delete(new Path(outputDir), true);
FileInputFormat.setInputPaths(job, inputDir);
job.setInputFormat(inputFormat);
job.setMapperClass(mapper);
FileOutputFormat.setOutputPath(job, new Path(outputDir));
job.setOutputFormat(outputFormat);
SequenceFileOutputFormat.setOutputCompressionType(job,
SequenceFile.CompressionType.BLOCK);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(mapoutputValueClass);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(outputValueClass);
job.setReducerClass(reducer);
job.setNumMapTasks(1);
job.setNumReduceTasks(numOfReducers);
job.setLong("datajoin.maxNumOfValuesPerGroup", maxNumOfValuesPerGroup);
return job;
}
/**
* Submit/run a map/reduce job.
*
* @param job
* @return true for success
* @throws IOException
*/
public static boolean runJob(JobConf job) throws IOException {
JobClient jc = new JobClient(job);
boolean sucess = true;
RunningJob running = null;
try {
running = jc.submitJob(job);
JobID jobId = running.getID();
System.out.println("Job " + jobId + " is submitted");
while (!running.isComplete()) {
System.out.println("Job " + jobId + " is still running.");
try {
Thread.sleep(60000);
} catch (InterruptedException e) {
}
running = jc.getJob(jobId);
}
sucess = running.isSuccessful();
} finally {
if (!sucess && (running != null)) {
running.killJob();
}
jc.close();
}
return sucess;
}
/**
* @param args
*/
public static void main(String[] args) {
boolean success;
if (args.length < 8 || args.length > 10) {
System.out.println("usage: DataJoinJob " + "inputdirs outputdir map_input_file_format "
+ "numofParts " + "mapper_class " + "reducer_class "
+ "map_output_value_class "
+ "output_value_class [maxNumOfValuesPerGroup [descriptionOfJob]]]");
System.exit(-1);
}
try {
JobConf job = DataJoinJob.createDataJoinJob(args);
success = DataJoinJob.runJob(job);
if (!success) {
System.out.println("Job failed");
}
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
}
| 6,002 | 33.302857 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import java.io.IOException;
import java.util.Iterator;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/**
* This abstract class serves as the base class for the reducer class of a data
* join job. The reduce function will first group the values according to their
* input tags, and then compute the cross product of over the groups. For each
* tuple in the cross product, it calls the following method, which is expected
* to be implemented in a subclass.
*
* protected abstract TaggedMapOutput combine(Object[] tags, Object[] values);
*
* The above method is expected to produce one output value from an array of
* records of different sources. The user code can also perform filtering here.
* It can return null if it decides to the records do not meet certain
* conditions.
*
*/
public abstract class DataJoinReducerBase extends JobBase {
protected Reporter reporter = null;
private long maxNumOfValuesPerGroup = 100;
protected long largestNumOfValues = 0;
protected long numOfValues = 0;
protected long collected = 0;
protected JobConf job;
public void close() throws IOException {
if (this.reporter != null) {
this.reporter.setStatus(super.getReport());
}
}
public void configure(JobConf job) {
super.configure(job);
this.job = job;
this.maxNumOfValuesPerGroup = job.getLong("datajoin.maxNumOfValuesPerGroup", 100);
}
/**
* The subclass can provide a different implementation on ResetableIterator.
* This is necessary if the number of values in a reduce call is very high.
*
* The default provided here uses ArrayListBackedIterator
*
* @return an Object of ResetableIterator.
*/
protected ResetableIterator createResetableIterator() {
return new ArrayListBackedIterator();
}
/**
* This is the function that re-groups values for a key into sub-groups based
* on a secondary key (input tag).
*
* @param arg1
* @return
*/
private SortedMap<Object, ResetableIterator> regroup(Object key,
Iterator arg1, Reporter reporter) throws IOException {
this.numOfValues = 0;
SortedMap<Object, ResetableIterator> retv = new TreeMap<Object, ResetableIterator>();
TaggedMapOutput aRecord = null;
while (arg1.hasNext()) {
this.numOfValues += 1;
if (this.numOfValues % 100 == 0) {
reporter.setStatus("key: " + key.toString() + " numOfValues: "
+ this.numOfValues);
}
if (this.numOfValues > this.maxNumOfValuesPerGroup) {
continue;
}
aRecord = ((TaggedMapOutput) arg1.next()).clone(job);
Text tag = aRecord.getTag();
ResetableIterator data = retv.get(tag);
if (data == null) {
data = createResetableIterator();
retv.put(tag, data);
}
data.add(aRecord);
}
if (this.numOfValues > this.largestNumOfValues) {
this.largestNumOfValues = numOfValues;
LOG.info("key: " + key.toString() + " this.largestNumOfValues: "
+ this.largestNumOfValues);
}
return retv;
}
public void reduce(Object key, Iterator values,
OutputCollector output, Reporter reporter) throws IOException {
if (this.reporter == null) {
this.reporter = reporter;
}
SortedMap<Object, ResetableIterator> groups = regroup(key, values, reporter);
Object[] tags = groups.keySet().toArray();
ResetableIterator[] groupValues = new ResetableIterator[tags.length];
for (int i = 0; i < tags.length; i++) {
groupValues[i] = groups.get(tags[i]);
}
joinAndCollect(tags, groupValues, key, output, reporter);
addLongValue("groupCount", 1);
for (int i = 0; i < tags.length; i++) {
groupValues[i].close();
}
}
/**
* The subclass can overwrite this method to perform additional filtering
* and/or other processing logic before a value is collected.
*
* @param key
* @param aRecord
* @param output
* @param reporter
* @throws IOException
*/
protected void collect(Object key, TaggedMapOutput aRecord,
OutputCollector output, Reporter reporter) throws IOException {
this.collected += 1;
addLongValue("collectedCount", 1);
if (aRecord != null) {
output.collect(key, aRecord.getData());
reporter.setStatus("key: " + key.toString() + " collected: " + collected);
addLongValue("actuallyCollectedCount", 1);
}
}
/**
* join the list of the value lists, and collect the results.
*
* @param tags
* a list of input tags
* @param values
* a list of value lists, each corresponding to one input source
* @param key
* @param output
* @throws IOException
*/
private void joinAndCollect(Object[] tags, ResetableIterator[] values,
Object key, OutputCollector output, Reporter reporter)
throws IOException {
if (values.length < 1) {
return;
}
Object[] partialList = new Object[values.length];
joinAndCollect(tags, values, 0, partialList, key, output, reporter);
}
/**
* Perform the actual join recursively.
*
* @param tags
* a list of input tags
* @param values
* a list of value lists, each corresponding to one input source
* @param pos
* indicating the next value list to be joined
* @param partialList
* a list of values, each from one value list considered so far.
* @param key
* @param output
* @throws IOException
*/
private void joinAndCollect(Object[] tags, ResetableIterator[] values,
int pos, Object[] partialList, Object key,
OutputCollector output, Reporter reporter) throws IOException {
if (values.length == pos) {
// get a value from each source. Combine them
TaggedMapOutput combined = combine(tags, partialList);
collect(key, combined, output, reporter);
return;
}
ResetableIterator nextValues = values[pos];
nextValues.reset();
while (nextValues.hasNext()) {
Object v = nextValues.next();
partialList[pos] = v;
joinAndCollect(tags, values, pos + 1, partialList, key, output, reporter);
}
}
public static Text SOURCE_TAGS_FIELD = new Text("SOURCE_TAGS");
public static Text NUM_OF_VALUES_FIELD = new Text("NUM_OF_VALUES");
/**
*
* @param tags
* a list of source tags
* @param values
* a value per source
* @return combined value derived from values of the sources
*/
protected abstract TaggedMapOutput combine(Object[] tags, Object[] values);
public void map(Object arg0, Object arg1, OutputCollector arg2,
Reporter arg3) throws IOException {
// TODO Auto-generated method stub
}
}
| 7,937 | 32.352941 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import java.io.IOException;
import java.util.Iterator;
/**
* This defines an iterator interface that will help the reducer class
* re-group its input by source tags. Once the values are re-grouped,
* the reducer will receive the cross product of values from different groups.
*/
public interface ResetableIterator extends Iterator {
public void reset();
public void add(Object item);
public void close() throws IOException;
}
| 1,294 | 34.972222 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.utils.join;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* This abstract class serves as the base class for the mapper class of a data
* join job. This class expects its subclasses to implement methods for the
* following functionalities:
*
* 1. Compute the source tag of input values 2. Compute the map output value
* object 3. Compute the map output key object
*
* The source tag will be used by the reducer to determine from which source
* (which table in SQL terminology) a value comes. Computing the map output
* value object amounts to performing projecting/filtering work in a SQL
* statement (through the select/where clauses). Computing the map output key
* amounts to choosing the join key. This class provides the appropriate plugin
* points for the user defined subclasses to implement the appropriate logic.
*
*/
public abstract class DataJoinMapperBase extends JobBase {
protected String inputFile = null;
protected JobConf job = null;
protected Text inputTag = null;
protected Reporter reporter = null;
public void configure(JobConf job) {
super.configure(job);
this.job = job;
this.inputFile = job.get(MRJobConfig.MAP_INPUT_FILE);
this.inputTag = generateInputTag(this.inputFile);
}
/**
* Determine the source tag based on the input file name.
*
* @param inputFile
* @return the source tag computed from the given file name.
*/
protected abstract Text generateInputTag(String inputFile);
/**
* Generate a tagged map output value. The user code can also perform
* projection/filtering. If it decides to discard the input record when
* certain conditions are met,it can simply return a null.
*
* @param value
* @return an object of TaggedMapOutput computed from the given value.
*/
protected abstract TaggedMapOutput generateTaggedMapOutput(Object value);
/**
* Generate a map output key. The user code can compute the key
* programmatically, not just selecting the values of some fields. In this
* sense, it is more general than the joining capabilities of SQL.
*
* @param aRecord
* @return the group key for the given record
*/
protected abstract Text generateGroupKey(TaggedMapOutput aRecord);
public void map(Object key, Object value,
OutputCollector output, Reporter reporter) throws IOException {
if (this.reporter == null) {
this.reporter = reporter;
}
addLongValue("totalCount", 1);
TaggedMapOutput aRecord = generateTaggedMapOutput(value);
if (aRecord == null) {
addLongValue("discardedCount", 1);
return;
}
Text groupKey = generateGroupKey(aRecord);
if (groupKey == null) {
addLongValue("nullGroupKeyCount", 1);
return;
}
output.collect(groupKey, aRecord);
addLongValue("collectedCount", 1);
}
public void close() throws IOException {
if (this.reporter != null) {
this.reporter.setStatus(super.getReport());
}
}
public void reduce(Object arg0, Iterator arg1,
OutputCollector arg2, Reporter arg3) throws IOException {
// TODO Auto-generated method stub
}
}
| 4,226 | 33.365854 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSeek.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
import org.apache.hadoop.fs.swift.http.SwiftProtocolConstants;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Test;
import java.io.EOFException;
import java.io.IOException;
/**
* Seek tests verify that
* <ol>
* <li>When you seek on a 0 byte file to byte (0), it's not an error.</li>
* <li>When you seek past the end of a file, it's an error that should
* raise -what- EOFException?</li>
* <li>when you seek forwards, you get new data</li>
* <li>when you seek backwards, you get the previous data</li>
* <li>That this works for big multi-MB files as well as small ones.</li>
* </ol>
* These may seem "obvious", but the more the input streams try to be clever
* about offsets and buffering, the more likely it is that seek() will start
* to get confused.
*/
public class TestSeek extends SwiftFileSystemBaseTest {
protected static final Log LOG =
LogFactory.getLog(TestSeek.class);
public static final int SMALL_SEEK_FILE_LEN = 256;
private Path testPath;
private Path smallSeekFile;
private Path zeroByteFile;
private FSDataInputStream instream;
/**
* Setup creates dirs under test/hadoop
*
* @throws Exception
*/
@Override
public void setUp() throws Exception {
super.setUp();
//delete the test directory
testPath = path("/test");
smallSeekFile = new Path(testPath, "seekfile.txt");
zeroByteFile = new Path(testPath, "zero.txt");
byte[] block = SwiftTestUtils.dataset(SMALL_SEEK_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value
createFile(smallSeekFile, block);
createEmptyFile(zeroByteFile);
}
@After
public void cleanFile() {
IOUtils.closeStream(instream);
instream = null;
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekZeroByteFile() throws Throwable {
instream = fs.open(zeroByteFile);
assertEquals(0, instream.getPos());
//expect initial read to fai;
int result = instream.read();
assertMinusOne("initial byte read", result);
byte[] buffer = new byte[1];
//expect that seek to 0 works
instream.seek(0);
//reread, expect same exception
result = instream.read();
assertMinusOne("post-seek byte read", result);
result = instream.read(buffer, 0, 1);
assertMinusOne("post-seek buffer read", result);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testBlockReadZeroByteFile() throws Throwable {
instream = fs.open(zeroByteFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
byte[] buffer = new byte[1];
int result = instream.read(buffer, 0, 1);
assertMinusOne("block read zero byte file", result);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekReadClosedFile() throws Throwable {
instream = fs.open(smallSeekFile);
instream.close();
try {
instream.seek(0);
} catch (SwiftConnectionClosedException e) {
//expected a closed file
}
try {
instream.read();
} catch (IOException e) {
//expected a closed file
}
try {
byte[] buffer = new byte[1];
int result = instream.read(buffer, 0, 1);
} catch (IOException e) {
//expected a closed file
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testNegativeSeek() throws Throwable {
instream = fs.open(smallSeekFile);
assertEquals(0, instream.getPos());
try {
instream.seek(-1);
long p = instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result = instream.read();
fail(
"expected an exception, got data " + result + " at a position of " + p);
} catch (IOException e) {
//bad seek -expected
}
assertEquals(0, instream.getPos());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekFile() throws Throwable {
instream = fs.open(smallSeekFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
instream.seek(0);
int result = instream.read();
assertEquals(0, result);
assertEquals(1, instream.read());
assertEquals(2, instream.getPos());
assertEquals(2, instream.read());
assertEquals(3, instream.getPos());
instream.seek(128);
assertEquals(128, instream.getPos());
assertEquals(128, instream.read());
instream.seek(63);
assertEquals(63, instream.read());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekAndReadPastEndOfFile() throws Throwable {
instream = fs.open(smallSeekFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
//go just before the end
instream.seek(SMALL_SEEK_FILE_LEN - 2);
assertTrue("Premature EOF", instream.read() != -1);
assertTrue("Premature EOF", instream.read() != -1);
assertMinusOne("read past end of file", instream.read());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekAndPastEndOfFileThenReseekAndRead() throws Throwable {
instream = fs.open(smallSeekFile);
//go just before the end. This may or may not fail; it may be delayed until the
//read
try {
instream.seek(SMALL_SEEK_FILE_LEN);
//if this doesn't trigger, then read() is expected to fail
assertMinusOne("read after seeking past EOF", instream.read());
} catch (EOFException expected) {
//here an exception was raised in seek
}
instream.seek(1);
assertTrue("Premature EOF", instream.read() != -1);
}
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.set(SwiftProtocolConstants.SWIFT_REQUEST_SIZE, "1");
return conf;
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekBigFile() throws Throwable {
Path testSeekFile = new Path(testPath, "bigseekfile.txt");
byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
createFile(testSeekFile, block);
instream = fs.open(testSeekFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
instream.seek(0);
int result = instream.read();
assertEquals(0, result);
assertEquals(1, instream.read());
assertEquals(2, instream.read());
//do seek 32KB ahead
instream.seek(32768);
assertEquals("@32768", block[32768], (byte) instream.read());
instream.seek(40000);
assertEquals("@40000", block[40000], (byte) instream.read());
instream.seek(8191);
assertEquals("@8191", block[8191], (byte) instream.read());
instream.seek(0);
assertEquals("@0", 0, (byte) instream.read());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
Path testSeekFile = new Path(testPath, "bigseekfile.txt");
byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
createFile(testSeekFile, block);
instream = fs.open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals (40000, instream.getPos());
byte[] readBuffer = new byte[256];
instream.read(128, readBuffer, 0, readBuffer.length);
//have gone back
assertEquals(40000, instream.getPos());
//content is the same too
assertEquals("@40000", block[40000], (byte) instream.read());
//now verify the picked up data
for (int i = 0; i < 256; i++) {
assertEquals("@" + i, block[i + 128], readBuffer[i]);
}
}
/**
* work out the expected byte from a specific offset
* @param offset offset in the file
* @return the value
*/
int expectedByte(int offset) {
return offset & 0xff;
}
}
| 8,772 | 32.613027 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemLsOperations.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import java.io.IOException;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.assertListStatusFinds;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.cleanup;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.dumpStats;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.touch;
/**
* Test the FileSystem#listStatus() operations
*/
public class TestSwiftFileSystemLsOperations extends SwiftFileSystemBaseTest {
private Path[] testDirs;
/**
* Setup creates dirs under test/hadoop
*
* @throws Exception
*/
@Override
public void setUp() throws Exception {
super.setUp();
//delete the test directory
Path test = path("/test");
fs.delete(test, true);
mkdirs(test);
}
/**
* Create subdirectories and files under test/ for those tests
* that want them. Doing so adds overhead to setup and teardown,
* so should only be done for those tests that need them.
* @throws IOException on an IO problem
*/
private void createTestSubdirs() throws IOException {
testDirs = new Path[]{
path("/test/hadoop/a"),
path("/test/hadoop/b"),
path("/test/hadoop/c/1"),
};
assertPathDoesNotExist("test directory setup", testDirs[0]);
for (Path path : testDirs) {
mkdirs(path);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListLevelTest() throws Exception {
createTestSubdirs();
FileStatus[] paths = fs.listStatus(path("/test"));
assertEquals(dumpStats("/test", paths), 1, paths.length);
assertEquals(path("/test/hadoop"), paths[0].getPath());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListLevelTestHadoop() throws Exception {
createTestSubdirs();
FileStatus[] paths;
paths = fs.listStatus(path("/test/hadoop"));
String stats = dumpStats("/test/hadoop", paths);
assertEquals("Paths.length wrong in " + stats, 3, paths.length);
assertEquals("Path element[0] wrong: " + stats, path("/test/hadoop/a"),
paths[0].getPath());
assertEquals("Path element[1] wrong: " + stats, path("/test/hadoop/b"),
paths[1].getPath());
assertEquals("Path element[2] wrong: " + stats, path("/test/hadoop/c"),
paths[2].getPath());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListStatusEmptyDirectory() throws Exception {
createTestSubdirs();
FileStatus[] paths;
paths = fs.listStatus(path("/test/hadoop/a"));
assertEquals(dumpStats("/test/hadoop/a", paths), 0,
paths.length);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListStatusFile() throws Exception {
describe("Create a single file under /test;" +
" assert that listStatus(/test) finds it");
Path file = path("/test/filename");
createFile(file);
FileStatus[] pathStats = fs.listStatus(file);
assertEquals(dumpStats("/test/", pathStats),
1,
pathStats.length);
//and assert that the len of that ls'd path is the same as the original
FileStatus lsStat = pathStats[0];
assertEquals("Wrong file len in listing of " + lsStat,
data.length, lsStat.getLen());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListEmptyRoot() throws Throwable {
describe("Empty the root dir and verify that an LS / returns {}");
cleanup("testListEmptyRoot", fs, "/test");
cleanup("testListEmptyRoot", fs, "/user");
FileStatus[] fileStatuses = fs.listStatus(path("/"));
assertEquals("Non-empty root" + dumpStats("/", fileStatuses),
0,
fileStatuses.length);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListNonEmptyRoot() throws Throwable {
Path test = path("/test");
touch(fs, test);
FileStatus[] fileStatuses = fs.listStatus(path("/"));
String stats = dumpStats("/", fileStatuses);
assertEquals("Wrong #of root children" + stats, 1, fileStatuses.length);
FileStatus status = fileStatuses[0];
assertEquals("Wrong path value" + stats,test, status.getPath());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListStatusRootDir() throws Throwable {
Path dir = path("/");
Path child = path("/test");
touch(fs, child);
assertListStatusFinds(fs, dir, child);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListStatusFiltered() throws Throwable {
Path dir = path("/");
Path child = path("/test");
touch(fs, child);
FileStatus[] stats = fs.listStatus(dir, new AcceptAllFilter());
boolean found = false;
StringBuilder builder = new StringBuilder();
for (FileStatus stat : stats) {
builder.append(stat.toString()).append('\n');
if (stat.getPath().equals(child)) {
found = true;
}
}
assertTrue("Path " + child
+ " not found in directory " + dir + ":" + builder,
found);
}
}
| 5,924 | 33.852941 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.commons.httpclient.Header;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.http.SwiftProtocolConstants;
import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.apache.hadoop.fs.swift.util.SwiftUtils;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import org.junit.internal.AssumptionViolatedException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.assertPathExists;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.readDataset;
/**
* Test partitioned uploads.
* This is done by forcing a very small partition size and verifying that it
* is picked up.
*/
public class TestSwiftFileSystemPartitionedUploads extends
SwiftFileSystemBaseTest {
public static final String WRONG_PARTITION_COUNT =
"wrong number of partitions written into ";
public static final int PART_SIZE = 1;
public static final int PART_SIZE_BYTES = PART_SIZE * 1024;
public static final int BLOCK_SIZE = 1024;
private URI uri;
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
//set the partition size to 1 KB
conf.setInt(SwiftProtocolConstants.SWIFT_PARTITION_SIZE, PART_SIZE);
return conf;
}
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testPartitionPropertyPropagatesToConf() throws Throwable {
assertEquals(1,
getConf().getInt(SwiftProtocolConstants.SWIFT_PARTITION_SIZE,
0));
}
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testPartionPropertyPropagatesToStore() throws Throwable {
assertEquals(1, fs.getStore().getPartsizeKB());
}
/**
* tests functionality for big files ( > 5Gb) upload
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {
final Path path = new Path("/test/testFilePartUpload");
int len = 8192;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
try {
int totalPartitionsToWrite = len / PART_SIZE_BYTES;
assertPartitionsWritten("Startup", out, 0);
//write 2048
int firstWriteLen = 2048;
out.write(src, 0, firstWriteLen);
//assert
long expected = getExpectedPartitionsWritten(firstWriteLen,
PART_SIZE_BYTES,
false);
SwiftUtils.debug(LOG, "First write: predict %d partitions written",
expected);
assertPartitionsWritten("First write completed", out, expected);
//write the rest
int remainder = len - firstWriteLen;
SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);
out.write(src, firstWriteLen, remainder);
expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
assertPartitionsWritten("Remaining data", out, expected);
out.close();
expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
assertPartitionsWritten("Stream closed", out, expected);
Header[] headers = fs.getStore().getObjectHeaders(path, true);
for (Header header : headers) {
LOG.info(header.toString());
}
byte[] dest = readDataset(fs, path, len);
LOG.info("Read dataset from " + path + ": data length =" + len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
FileStatus status;
final Path qualifiedPath = path.makeQualified(fs);
status = fs.getFileStatus(qualifiedPath);
//now see what block location info comes back.
//This will vary depending on the Swift version, so the results
//aren't checked -merely that the test actually worked
BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
assertNotNull("Null getFileBlockLocations()", locations);
assertTrue("empty array returned for getFileBlockLocations()",
locations.length > 0);
//last bit of test -which seems to play up on partitions, which we download
//to a skip
try {
validatePathLen(path, len);
} catch (AssertionError e) {
//downgrade to a skip
throw new AssumptionViolatedException(e, null);
}
} finally {
IOUtils.closeStream(out);
}
}
/**
* tests functionality for big files ( > 5Gb) upload
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {
final Path path = new Path("/test/testFilePartUploadLengthCheck");
int len = 8192;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
try {
int totalPartitionsToWrite = len / PART_SIZE_BYTES;
assertPartitionsWritten("Startup", out, 0);
//write 2048
int firstWriteLen = 2048;
out.write(src, 0, firstWriteLen);
//assert
long expected = getExpectedPartitionsWritten(firstWriteLen,
PART_SIZE_BYTES,
false);
SwiftUtils.debug(LOG, "First write: predict %d partitions written",
expected);
assertPartitionsWritten("First write completed", out, expected);
//write the rest
int remainder = len - firstWriteLen;
SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);
out.write(src, firstWriteLen, remainder);
expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
assertPartitionsWritten("Remaining data", out, expected);
out.close();
expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
assertPartitionsWritten("Stream closed", out, expected);
Header[] headers = fs.getStore().getObjectHeaders(path, true);
for (Header header : headers) {
LOG.info(header.toString());
}
byte[] dest = readDataset(fs, path, len);
LOG.info("Read dataset from " + path + ": data length =" + len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
FileStatus status = fs.getFileStatus(path);
//now see what block location info comes back.
//This will vary depending on the Swift version, so the results
//aren't checked -merely that the test actually worked
BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
assertNotNull("Null getFileBlockLocations()", locations);
assertTrue("empty array returned for getFileBlockLocations()",
locations.length > 0);
} finally {
IOUtils.closeStream(out);
}
}
private FileStatus validatePathLen(Path path, int len) throws IOException {
//verify that the length is what was written in a direct status check
final Path qualifiedPath = path.makeQualified(fs);
FileStatus[] parentDirListing = fs.listStatus(qualifiedPath.getParent());
StringBuilder listing = lsToString(parentDirListing);
String parentDirLS = listing.toString();
FileStatus status = fs.getFileStatus(qualifiedPath);
assertEquals("Length of written file " + qualifiedPath
+ " from status check " + status
+ " in dir " + listing,
len,
status.getLen());
String fileInfo = qualifiedPath + " " + status;
assertFalse("File claims to be a directory " + fileInfo,
status.isDir());
FileStatus listedFileStat = resolveChild(parentDirListing, qualifiedPath);
assertNotNull("Did not find " + path + " in " + parentDirLS,
listedFileStat);
//file is in the parent dir. Now validate it's stats
assertEquals("Wrong len for " + path + " in listing " + parentDirLS,
len,
listedFileStat.getLen());
listedFileStat.toString();
return status;
}
private FileStatus resolveChild(FileStatus[] parentDirListing,
Path childPath) {
FileStatus listedFileStat = null;
for (FileStatus stat : parentDirListing) {
if (stat.getPath().equals(childPath)) {
listedFileStat = stat;
}
}
return listedFileStat;
}
private StringBuilder lsToString(FileStatus[] parentDirListing) {
StringBuilder listing = new StringBuilder();
for (FileStatus stat : parentDirListing) {
listing.append(stat).append("\n");
}
return listing;
}
/**
* Calculate the #of partitions expected from the upload
* @param uploaded number of bytes uploaded
* @param partSizeBytes the partition size
* @param closed whether or not the stream has closed
* @return the expected number of partitions, for use in assertions.
*/
private int getExpectedPartitionsWritten(long uploaded,
int partSizeBytes,
boolean closed) {
//#of partitions in total
int partitions = (int) (uploaded / partSizeBytes);
//#of bytes past the last partition
int remainder = (int) (uploaded % partSizeBytes);
if (closed) {
//all data is written, so if there was any remainder, it went up
//too
return partitions + ((remainder > 0) ? 1 : 0);
} else {
//not closed. All the remainder is buffered,
return partitions;
}
}
private int getBufferSize() {
return fs.getConf().getInt("io.file.buffer.size", 4096);
}
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = fs.listStatus(path);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
/**
* Test that when a partitioned file is overwritten by a smaller one,
* all the old partitioned files go away
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testOverwritePartitionedFile() throws Throwable {
final Path path = new Path("/test/testOverwritePartitionedFile");
final int len1 = 8192;
final byte[] src1 = SwiftTestUtils.dataset(len1, 'A', 'Z');
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
1024);
out.write(src1, 0, len1);
out.close();
long expected = getExpectedPartitionsWritten(len1,
PART_SIZE_BYTES,
false);
assertPartitionsWritten("initial upload", out, expected);
assertExists("Exists", path);
FileStatus status = fs.getFileStatus(path);
assertEquals("Length", len1, status.getLen());
//now write a shorter file with a different dataset
final int len2 = 4095;
final byte[] src2 = SwiftTestUtils.dataset(len2, 'a', 'z');
out = fs.create(path,
true,
getBufferSize(),
(short) 1,
1024);
out.write(src2, 0, len2);
out.close();
status = fs.getFileStatus(path);
assertEquals("Length", len2, status.getLen());
byte[] dest = readDataset(fs, path, len2);
//compare data
SwiftTestUtils.compareByteArrays(src2, dest, len2);
}
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testDeleteSmallPartitionedFile() throws Throwable {
final Path path = new Path("/test/testDeleteSmallPartitionedFile");
final int len1 = 1024;
final byte[] src1 = SwiftTestUtils.dataset(len1, 'A', 'Z');
SwiftTestUtils.writeDataset(fs, path, src1, len1, 1024, false);
assertExists("Exists", path);
Path part_0001 = new Path(path, SwiftUtils.partitionFilenameFromNumber(1));
Path part_0002 = new Path(path, SwiftUtils.partitionFilenameFromNumber(2));
String ls = SwiftTestUtils.ls(fs, path);
assertExists("Partition 0001 Exists in " + ls, part_0001);
assertPathDoesNotExist("partition 0002 found under " + ls, part_0002);
assertExists("Partition 0002 Exists in " + ls, part_0001);
fs.delete(path, false);
assertPathDoesNotExist("deleted file still there", path);
ls = SwiftTestUtils.ls(fs, path);
assertPathDoesNotExist("partition 0001 file still under " + ls, part_0001);
}
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testDeletePartitionedFile() throws Throwable {
final Path path = new Path("/test/testDeletePartitionedFile");
SwiftTestUtils.writeDataset(fs, path, data, data.length, 1024, false);
assertExists("Exists", path);
Path part_0001 = new Path(path, SwiftUtils.partitionFilenameFromNumber(1));
Path part_0002 = new Path(path, SwiftUtils.partitionFilenameFromNumber(2));
String ls = SwiftTestUtils.ls(fs, path);
assertExists("Partition 0001 Exists in " + ls, part_0001);
assertExists("Partition 0002 Exists in " + ls, part_0001);
fs.delete(path, false);
assertPathDoesNotExist("deleted file still there", path);
ls = SwiftTestUtils.ls(fs, path);
assertPathDoesNotExist("partition 0001 file still under " + ls, part_0001);
assertPathDoesNotExist("partition 0002 file still under " + ls, part_0002);
}
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testRenamePartitionedFile() throws Throwable {
Path src = new Path("/test/testRenamePartitionedFileSrc");
int len = data.length;
SwiftTestUtils.writeDataset(fs, src, data, len, 1024, false);
assertExists("Exists", src);
String partOneName = SwiftUtils.partitionFilenameFromNumber(1);
Path srcPart = new Path(src, partOneName);
Path dest = new Path("/test/testRenamePartitionedFileDest");
Path destPart = new Path(src, partOneName);
assertExists("Partition Exists", srcPart);
fs.rename(src, dest);
assertPathExists(fs, "dest file missing", dest);
FileStatus status = fs.getFileStatus(dest);
assertEquals("Length of renamed file is wrong", len, status.getLen());
byte[] destData = readDataset(fs, dest, len);
//compare data
SwiftTestUtils.compareByteArrays(data, destData, len);
String srcLs = SwiftTestUtils.ls(fs, src);
String destLs = SwiftTestUtils.ls(fs, dest);
assertPathDoesNotExist("deleted file still found in " + srcLs, src);
assertPathDoesNotExist("partition file still found in " + srcLs, srcPart);
}
}
| 17,465 | 38.426637 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftBadRequestException;
import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
import java.io.FileNotFoundException;
import java.io.IOException;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.assertFileHasLength;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.assertIsDirectory;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.readBytesToString;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.writeTextFile;
/**
* Test basic filesystem operations.
* Many of these are similar to those in {@link TestSwiftFileSystemContract}
* -this is a JUnit4 test suite used to initially test the Swift
* component. Once written, there's no reason not to retain these tests.
*/
public class TestSwiftFileSystemBasicOps extends SwiftFileSystemBaseTest {
private static final Log LOG =
LogFactory.getLog(TestSwiftFileSystemBasicOps.class);
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLsRoot() throws Throwable {
Path path = new Path("/");
FileStatus[] statuses = fs.listStatus(path);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testMkDir() throws Throwable {
Path path = new Path("/test/MkDir");
fs.mkdirs(path);
//success then -so try a recursive operation
fs.delete(path, true);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDeleteNonexistentFile() throws Throwable {
Path path = new Path("/test/DeleteNonexistentFile");
assertFalse("delete returned true", fs.delete(path, false));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testPutFile() throws Throwable {
Path path = new Path("/test/PutFile");
Exception caught = null;
writeTextFile(fs, path, "Testing a put to a file", false);
assertDeleted(path, false);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testPutGetFile() throws Throwable {
Path path = new Path("/test/PutGetFile");
try {
String text = "Testing a put and get to a file "
+ System.currentTimeMillis();
writeTextFile(fs, path, text, false);
String result = readBytesToString(fs, path, text.length());
assertEquals(text, result);
} finally {
delete(fs, path);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testPutDeleteFileInSubdir() throws Throwable {
Path path =
new Path("/test/PutDeleteFileInSubdir/testPutDeleteFileInSubdir");
String text = "Testing a put and get to a file in a subdir "
+ System.currentTimeMillis();
writeTextFile(fs, path, text, false);
assertDeleted(path, false);
//now delete the parent that should have no children
assertDeleted(new Path("/test/PutDeleteFileInSubdir"), false);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRecursiveDelete() throws Throwable {
Path childpath =
new Path("/test/testRecursiveDelete");
String text = "Testing a put and get to a file in a subdir "
+ System.currentTimeMillis();
writeTextFile(fs, childpath, text, false);
//now delete the parent that should have no children
assertDeleted(new Path("/test"), true);
assertFalse("child entry still present " + childpath, fs.exists(childpath));
}
private void delete(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, false)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
private void deleteR(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, true)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testOverwrite() throws Throwable {
Path path = new Path("/test/Overwrite");
try {
String text = "Testing a put to a file "
+ System.currentTimeMillis();
writeTextFile(fs, path, text, false);
assertFileHasLength(fs, path, text.length());
String text2 = "Overwriting a file "
+ System.currentTimeMillis();
writeTextFile(fs, path, text2, true);
assertFileHasLength(fs, path, text2.length());
String result = readBytesToString(fs, path, text2.length());
assertEquals(text2, result);
} finally {
delete(fs, path);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testOverwriteDirectory() throws Throwable {
Path path = new Path("/test/testOverwriteDirectory");
try {
fs.mkdirs(path.getParent());
String text = "Testing a put to a file "
+ System.currentTimeMillis();
writeTextFile(fs, path, text, false);
assertFileHasLength(fs, path, text.length());
} finally {
delete(fs, path);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testFileStatus() throws Throwable {
Path path = new Path("/test/FileStatus");
try {
String text = "Testing File Status "
+ System.currentTimeMillis();
writeTextFile(fs, path, text, false);
SwiftTestUtils.assertIsFile(fs, path);
} finally {
delete(fs, path);
}
}
/**
* Assert that a newly created directory is a directory
*
* @throws Throwable if not, or if something else failed
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDirStatus() throws Throwable {
Path path = new Path("/test/DirStatus");
try {
fs.mkdirs(path);
assertIsDirectory(fs, path);
} finally {
delete(fs, path);
}
}
/**
* Assert that if a directory that has children is deleted, it is still
* a directory
*
* @throws Throwable if not, or if something else failed
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDirStaysADir() throws Throwable {
Path path = new Path("/test/dirStaysADir");
Path child = new Path(path, "child");
try {
//create the dir
fs.mkdirs(path);
//assert the parent has the directory nature
assertIsDirectory(fs, path);
//create the child dir
writeTextFile(fs, child, "child file", true);
//assert the parent has the directory nature
assertIsDirectory(fs, path);
//now rm the child
delete(fs, child);
} finally {
deleteR(fs, path);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testCreateMultilevelDir() throws Throwable {
Path base = new Path("/test/CreateMultilevelDir");
Path path = new Path(base, "1/2/3");
fs.mkdirs(path);
assertExists("deep multilevel dir not created", path);
fs.delete(base, true);
assertPathDoesNotExist("Multilevel delete failed", path);
assertPathDoesNotExist("Multilevel delete failed", base);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testCreateDirWithFileParent() throws Throwable {
Path path = new Path("/test/CreateDirWithFileParent");
Path child = new Path(path, "subdir/child");
fs.mkdirs(path.getParent());
try {
//create the child dir
writeTextFile(fs, path, "parent", true);
try {
fs.mkdirs(child);
} catch (ParentNotDirectoryException expected) {
LOG.debug("Expected Exception", expected);
}
} finally {
fs.delete(path, true);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLongObjectNamesForbidden() throws Throwable {
StringBuilder buffer = new StringBuilder(1200);
buffer.append("/");
for (int i = 0; i < (1200 / 4); i++) {
buffer.append(String.format("%04x", i));
}
String pathString = buffer.toString();
Path path = new Path(pathString);
try {
writeTextFile(fs, path, pathString, true);
//if we get here, problems.
fs.delete(path, false);
fail("Managed to create an object with a name of length "
+ pathString.length());
} catch (SwiftBadRequestException e) {
//expected
//LOG.debug("Caught exception " + e, e);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLsNonExistentFile() throws Exception {
try {
Path path = new Path("/test/hadoop/file");
FileStatus[] statuses = fs.listStatus(path);
fail("Should throw FileNotFoundException on " + path
+ " but got list of length " + statuses.length);
} catch (FileNotFoundException fnfe) {
// expected
}
}
}
| 9,602 | 32.113793 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import junit.framework.AssertionFailedError;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* This is the full filesystem contract test -which requires the
* Default config set up to point to a filesystem.
*
* Some of the tests override the base class tests -these
* are where SwiftFS does not implement those features, or
* when the behavior of SwiftFS does not match the normal
* contract -which normally means that directories and equal files
* are being treated as equal.
*/
public class TestSwiftFileSystemContract
extends FileSystemContractBaseTest {
private static final Log LOG =
LogFactory.getLog(TestSwiftFileSystemContract.class);
/**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
*/
protected boolean filesystemIsCaseSensitive() {
return false;
}
@Override
protected void setUp() throws Exception {
final URI uri = getFilesystemURI();
final Configuration conf = new Configuration();
fs = createSwiftFS();
try {
fs.initialize(uri, conf);
} catch (IOException e) {
//FS init failed, set it to null so that teardown doesn't
//attempt to use it
fs = null;
throw e;
}
super.setUp();
}
protected URI getFilesystemURI() throws URISyntaxException, IOException {
return SwiftTestUtils.getServiceURI(new Configuration());
}
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
@Override
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = path("/test/hadoop");
assertFalse(fs.exists(testDir));
assertTrue(fs.mkdirs(testDir));
assertTrue(fs.exists(testDir));
Path filepath = path("/test/hadoop/file");
SwiftTestUtils.writeTextFile(fs, filepath, "hello, world", false);
Path testSubDir = new Path(filepath, "subdir");
SwiftTestUtils.assertPathDoesNotExist(fs, "subdir before mkdir", testSubDir);
try {
fs.mkdirs(testSubDir);
fail("Should throw IOException.");
} catch (ParentNotDirectoryException e) {
// expected
}
//now verify that the subdir path does not exist
SwiftTestUtils.assertPathDoesNotExist(fs, "subdir after mkdir", testSubDir);
Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
try {
fs.mkdirs(testDeepSubDir);
fail("Should throw IOException.");
} catch (ParentNotDirectoryException e) {
// expected
}
SwiftTestUtils.assertPathDoesNotExist(fs, "testDeepSubDir after mkdir",
testDeepSubDir);
}
@Override
public void testWriteReadAndDeleteEmptyFile() throws Exception {
try {
super.testWriteReadAndDeleteEmptyFile();
} catch (AssertionFailedError e) {
SwiftTestUtils.downgrade("empty files get mistaken for directories", e);
}
}
@Override
public void testMkdirsWithUmask() throws Exception {
//unsupported
}
public void testZeroByteFilesAreFiles() throws Exception {
// SwiftTestUtils.unsupported("testZeroByteFilesAreFiles");
}
}
| 4,555 | 32.5 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftOperationFailedException;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
import java.io.IOException;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.compareByteArrays;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.dataset;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.readBytesToString;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.readDataset;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.writeDataset;
public class TestSwiftFileSystemRename extends SwiftFileSystemBaseTest {
/**
* Rename a file into a directory
*
* @throws Exception
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameFileIntoExistingDirectory() throws Exception {
assumeRenameSupported();
Path src = path("/test/olddir/file");
createFile(src);
Path dst = path("/test/new/newdir");
fs.mkdirs(dst);
rename(src, dst, true, false, true);
Path newFile = path("/test/new/newdir/file");
if (!fs.exists(newFile)) {
String ls = ls(dst);
LOG.info(ls(path("/test/new")));
LOG.info(ls(path("/test/hadoop")));
fail("did not find " + newFile + " - directory: " + ls);
}
assertTrue("Destination changed",
fs.exists(path("/test/new/newdir/file")));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameFile() throws Exception {
assumeRenameSupported();
final Path old = new Path("/test/alice/file");
final Path newPath = new Path("/test/bob/file");
fs.mkdirs(newPath.getParent());
final FSDataOutputStream fsDataOutputStream = fs.create(old);
final byte[] message = "Some data".getBytes();
fsDataOutputStream.write(message);
fsDataOutputStream.close();
assertTrue(fs.exists(old));
rename(old, newPath, true, false, true);
final FSDataInputStream bobStream = fs.open(newPath);
final byte[] bytes = new byte[512];
final int read = bobStream.read(bytes);
bobStream.close();
final byte[] buffer = new byte[read];
System.arraycopy(bytes, 0, buffer, 0, read);
assertEquals(new String(message), new String(buffer));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameDirectory() throws Exception {
assumeRenameSupported();
final Path old = new Path("/test/data/logs");
final Path newPath = new Path("/test/var/logs");
fs.mkdirs(old);
fs.mkdirs(newPath.getParent());
assertTrue(fs.exists(old));
rename(old, newPath, true, false, true);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameTheSameDirectory() throws Exception {
assumeRenameSupported();
final Path old = new Path("/test/usr/data");
fs.mkdirs(old);
rename(old, old, false, true, true);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameDirectoryIntoExistingDirectory() throws Exception {
assumeRenameSupported();
Path src = path("/test/olddir/dir");
fs.mkdirs(src);
createFile(path("/test/olddir/dir/file1"));
createFile(path("/test/olddir/dir/subdir/file2"));
Path dst = path("/test/new/newdir");
fs.mkdirs(dst);
//this renames into a child
rename(src, dst, true, false, true);
assertExists("new dir", path("/test/new/newdir/dir"));
assertExists("Renamed nested file1", path("/test/new/newdir/dir/file1"));
assertPathDoesNotExist("Nested file1 should have been deleted",
path("/test/olddir/dir/file1"));
assertExists("Renamed nested subdir",
path("/test/new/newdir/dir/subdir/"));
assertExists("file under subdir",
path("/test/new/newdir/dir/subdir/file2"));
assertPathDoesNotExist("Nested /test/hadoop/dir/subdir/file2 still exists",
path("/test/olddir/dir/subdir/file2"));
}
/**
* trying to rename a directory onto itself should fail,
* preserving everything underneath.
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameDirToSelf() throws Throwable {
assumeRenameSupported();
Path parentdir = path("/test/parentdir");
fs.mkdirs(parentdir);
Path child = new Path(parentdir, "child");
createFile(child);
rename(parentdir, parentdir, false, true, true);
//verify the child is still there
assertIsFile(child);
}
/**
* Assert that root directory renames are not allowed
*
* @throws Exception on failures
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameRootDirForbidden() throws Exception {
assumeRenameSupported();
rename(path("/"),
path("/test/newRootDir"),
false, true, false);
}
/**
* Assert that renaming a parent directory to be a child
* of itself is forbidden
*
* @throws Exception on failures
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameChildDirForbidden() throws Exception {
assumeRenameSupported();
Path parentdir = path("/test/parentdir");
fs.mkdirs(parentdir);
Path childFile = new Path(parentdir, "childfile");
createFile(childFile);
//verify one level down
Path childdir = new Path(parentdir, "childdir");
rename(parentdir, childdir, false, true, false);
//now another level
fs.mkdirs(childdir);
Path childchilddir = new Path(childdir, "childdir");
rename(parentdir, childchilddir, false, true, false);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameFileAndVerifyContents() throws IOException {
assumeRenameSupported();
final Path filePath = new Path("/test/home/user/documents/file.txt");
final Path newFilePath = new Path("/test/home/user/files/file.txt");
mkdirs(newFilePath.getParent());
int len = 1024;
byte[] dataset = dataset(len, 'A', 26);
writeDataset(fs, filePath, dataset, len, len, false);
rename(filePath, newFilePath, true, false, true);
byte[] dest = readDataset(fs, newFilePath, len);
compareByteArrays(dataset, dest, len);
String reread = readBytesToString(fs, newFilePath, 20);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testMoveFileUnderParent() throws Throwable {
if (!renameSupported()) return;
Path filepath = path("test/file");
createFile(filepath);
//HDFS expects rename src, src -> true
rename(filepath, filepath, true, true, true);
//verify the file is still there
assertIsFile(filepath);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testMoveDirUnderParent() throws Throwable {
if (!renameSupported()) {
return;
}
Path testdir = path("test/dir");
fs.mkdirs(testdir);
Path parent = testdir.getParent();
//the outcome here is ambiguous, so is not checked
try {
fs.rename(testdir, parent);
} catch (SwiftOperationFailedException e) {
// allowed
}
assertExists("Source directory has been deleted ", testdir);
}
/**
* trying to rename a file onto itself should succeed (it's a no-op)
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameFileToSelf() throws Throwable {
if (!renameSupported()) return;
Path filepath = path("test/file");
createFile(filepath);
//HDFS expects rename src, src -> true
rename(filepath, filepath, true, true, true);
//verify the file is still there
assertIsFile(filepath);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenamedConsistence() throws IOException {
assumeRenameSupported();
describe("verify that overwriting a file with new data doesn't impact" +
" the existing content");
final Path filePath = new Path("/test/home/user/documents/file.txt");
final Path newFilePath = new Path("/test/home/user/files/file.txt");
mkdirs(newFilePath.getParent());
int len = 1024;
byte[] dataset = dataset(len, 'A', 26);
byte[] dataset2 = dataset(len, 'a', 26);
writeDataset(fs, filePath, dataset, len, len, false);
rename(filePath, newFilePath, true, false, true);
SwiftTestUtils.writeAndRead(fs, filePath, dataset2, len, len, false, true);
byte[] dest = readDataset(fs, newFilePath, len);
compareByteArrays(dataset, dest, len);
String reread = readBytesToString(fs, newFilePath, 20);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameMissingFile() throws Throwable {
assumeRenameSupported();
Path path = path("/test/RenameMissingFile");
Path path2 = path("/test/RenameMissingFileDest");
mkdirs(path("test"));
rename(path, path2, false, false, false);
}
}
| 9,532 | 33.539855 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftObjectPath.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.http.RestClientBindings;
import org.apache.hadoop.fs.swift.http.SwiftRestClient;
import org.apache.hadoop.fs.swift.util.SwiftObjectPath;
import org.apache.hadoop.fs.swift.util.SwiftUtils;
import org.junit.Test;
import java.net.URI;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Unit tests for SwiftObjectPath class.
*/
public class TestSwiftObjectPath implements SwiftTestConstants {
private static final Log LOG = LogFactory.getLog(TestSwiftObjectPath.class);
/**
* What an endpoint looks like. This is derived from a (valid)
* rackspace endpoint address
*/
private static final String ENDPOINT =
"https://storage101.region1.example.org/v1/MossoCloudFS_9fb40cc0-1234-5678-9abc-def000c9a66";
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testParsePath() throws Exception {
final String pathString = "/home/user/files/file1";
final Path path = new Path(pathString);
final URI uri = new URI("http://container.localhost");
final SwiftObjectPath expected = SwiftObjectPath.fromPath(uri, path);
final SwiftObjectPath actual = new SwiftObjectPath(
RestClientBindings.extractContainerName(uri),
pathString);
assertEquals(expected, actual);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testParseUrlPath() throws Exception {
final String pathString = "swift://container.service1/home/user/files/file1";
final URI uri = new URI(pathString);
final Path path = new Path(pathString);
final SwiftObjectPath expected = SwiftObjectPath.fromPath(uri, path);
final SwiftObjectPath actual = new SwiftObjectPath(
RestClientBindings.extractContainerName(uri),
"/home/user/files/file1");
assertEquals(expected, actual);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testHandleUrlAsPath() throws Exception {
final String hostPart = "swift://container.service1";
final String pathPart = "/home/user/files/file1";
final String uriString = hostPart + pathPart;
final SwiftObjectPath expected = new SwiftObjectPath(uriString, pathPart);
final SwiftObjectPath actual = new SwiftObjectPath(uriString, uriString);
assertEquals(expected, actual);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testParseAuthenticatedUrl() throws Exception {
final String pathString = "swift://container.service1/v2/AUTH_00345h34l93459y4/home/tom/documents/finance.docx";
final URI uri = new URI(pathString);
final Path path = new Path(pathString);
final SwiftObjectPath expected = SwiftObjectPath.fromPath(uri, path);
final SwiftObjectPath actual = new SwiftObjectPath(
RestClientBindings.extractContainerName(uri),
"/home/tom/documents/finance.docx");
assertEquals(expected, actual);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testConvertToPath() throws Throwable {
String initialpath = "/dir/file1";
Path ipath = new Path(initialpath);
SwiftObjectPath objectPath = SwiftObjectPath.fromPath(new URI(initialpath),
ipath);
URI endpoint = new URI(ENDPOINT);
URI uri = SwiftRestClient.pathToURI(objectPath, endpoint);
LOG.info("Inital Hadoop Path =" + initialpath);
LOG.info("Merged URI=" + uri);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRootDirProbeEmptyPath() throws Throwable {
SwiftObjectPath object=new SwiftObjectPath("container","");
assertTrue(SwiftUtils.isRootDir(object));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRootDirProbeRootPath() throws Throwable {
SwiftObjectPath object=new SwiftObjectPath("container","/");
assertTrue(SwiftUtils.isRootDir(object));
}
private void assertParentOf(SwiftObjectPath p1, SwiftObjectPath p2) {
assertTrue(p1.toString() + " is not a parent of " + p2 ,p1.isEqualToOrParentOf(
p2));
}
private void assertNotParentOf(SwiftObjectPath p1, SwiftObjectPath p2) {
assertFalse(p1.toString() + " is a parent of " + p2, p1.isEqualToOrParentOf(
p2));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testChildOfProbe() throws Throwable {
SwiftObjectPath parent = new SwiftObjectPath("container",
"/parent");
SwiftObjectPath parent2 = new SwiftObjectPath("container",
"/parent2");
SwiftObjectPath child = new SwiftObjectPath("container",
"/parent/child");
SwiftObjectPath sibling = new SwiftObjectPath("container",
"/parent/sibling");
SwiftObjectPath grandchild = new SwiftObjectPath("container",
"/parent/child/grandchild");
assertParentOf(parent, child);
assertParentOf(parent, grandchild);
assertParentOf(child, grandchild);
assertParentOf(parent, parent);
assertNotParentOf(child, parent);
assertParentOf(child, child);
assertNotParentOf(parent, parent2);
assertNotParentOf(grandchild, parent);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testChildOfRoot() throws Throwable {
SwiftObjectPath root = new SwiftObjectPath("container", "/");
SwiftObjectPath child = new SwiftObjectPath("container", "child");
SwiftObjectPath grandchild = new SwiftObjectPath("container",
"/child/grandchild");
assertParentOf(root, child);
assertParentOf(root, grandchild);
assertParentOf(child, grandchild);
assertParentOf(root, root);
assertNotParentOf(child, root);
assertParentOf(child, child);
assertNotParentOf(grandchild, root);
}
}
| 6,794 | 38.736842 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.swift.http.SwiftRestClient;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.DOT_AUTH_URL;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.DOT_LOCATION_AWARE;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.DOT_PASSWORD;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.DOT_TENANT;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.DOT_USERNAME;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_BLOCKSIZE;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_CONNECTION_TIMEOUT;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_PARTITION_SIZE;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_PROXY_HOST_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_PROXY_PORT_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_RETRY_COUNT;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_SERVICE_PREFIX;
/**
* Test the swift service-specific configuration binding features
*/
public class TestSwiftConfig extends Assert {
public static final String SERVICE = "openstack";
@Test(expected = org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException.class)
public void testEmptyUrl() throws Exception {
final Configuration configuration = new Configuration();
set(configuration, DOT_TENANT, "tenant");
set(configuration, DOT_USERNAME, "username");
set(configuration, DOT_PASSWORD, "password");
mkInstance(configuration);
}
@Test
public void testEmptyTenant() throws Exception {
final Configuration configuration = new Configuration();
set(configuration, DOT_AUTH_URL, "http://localhost:8080");
set(configuration, DOT_USERNAME, "username");
set(configuration, DOT_PASSWORD, "password");
mkInstance(configuration);
}
@Test(expected = org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException.class)
public void testEmptyUsername() throws Exception {
final Configuration configuration = new Configuration();
set(configuration, DOT_AUTH_URL, "http://localhost:8080");
set(configuration, DOT_TENANT, "tenant");
set(configuration, DOT_PASSWORD, "password");
mkInstance(configuration);
}
@Test(expected = org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException.class)
public void testEmptyPassword() throws Exception {
final Configuration configuration = new Configuration();
set(configuration, DOT_AUTH_URL, "http://localhost:8080");
set(configuration, DOT_TENANT, "tenant");
set(configuration, DOT_USERNAME, "username");
mkInstance(configuration);
}
@Test
public void testGoodRetryCount() throws Exception {
final Configuration configuration = createCoreConfig();
configuration.set(SWIFT_RETRY_COUNT, "3");
mkInstance(configuration);
}
@Test(expected = org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException.class)
public void testBadRetryCount() throws Exception {
final Configuration configuration = createCoreConfig();
configuration.set(SWIFT_RETRY_COUNT, "three");
mkInstance(configuration);
}
@Test(expected = org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException.class)
public void testBadConnectTimeout() throws Exception {
final Configuration configuration = createCoreConfig();
configuration.set(SWIFT_CONNECTION_TIMEOUT, "three");
mkInstance(configuration);
}
@Test(expected = org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException.class)
public void testZeroBlocksize() throws Exception {
final Configuration configuration = createCoreConfig();
configuration.set(SWIFT_BLOCKSIZE, "0");
mkInstance(configuration);
}
@Test(expected = org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException.class)
public void testNegativeBlocksize() throws Exception {
final Configuration configuration = createCoreConfig();
configuration.set(SWIFT_BLOCKSIZE, "-1");
mkInstance(configuration);
}
@Test
public void testPositiveBlocksize() throws Exception {
final Configuration configuration = createCoreConfig();
int size = 127;
configuration.set(SWIFT_BLOCKSIZE, Integer.toString(size));
SwiftRestClient restClient = mkInstance(configuration);
assertEquals(size, restClient.getBlocksizeKB());
}
@Test
public void testLocationAwareTruePropagates() throws Exception {
final Configuration configuration = createCoreConfig();
set(configuration, DOT_LOCATION_AWARE, "true");
SwiftRestClient restClient = mkInstance(configuration);
assertTrue(restClient.isLocationAware());
}
@Test
public void testLocationAwareFalsePropagates() throws Exception {
final Configuration configuration = createCoreConfig();
set(configuration, DOT_LOCATION_AWARE, "false");
SwiftRestClient restClient = mkInstance(configuration);
assertFalse(restClient.isLocationAware());
}
@Test(expected = org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException.class)
public void testNegativePartsize() throws Exception {
final Configuration configuration = createCoreConfig();
configuration.set(SWIFT_PARTITION_SIZE, "-1");
SwiftRestClient restClient = mkInstance(configuration);
}
@Test
public void testPositivePartsize() throws Exception {
final Configuration configuration = createCoreConfig();
int size = 127;
configuration.set(SWIFT_PARTITION_SIZE, Integer.toString(size));
SwiftRestClient restClient = mkInstance(configuration);
assertEquals(size, restClient.getPartSizeKB());
}
@Test
public void testProxyData() throws Exception {
final Configuration configuration = createCoreConfig();
String proxy="web-proxy";
int port = 8088;
configuration.set(SWIFT_PROXY_HOST_PROPERTY, proxy);
configuration.set(SWIFT_PROXY_PORT_PROPERTY, Integer.toString(port));
SwiftRestClient restClient = mkInstance(configuration);
assertEquals(proxy, restClient.getProxyHost());
assertEquals(port, restClient.getProxyPort());
}
private Configuration createCoreConfig() {
final Configuration configuration = new Configuration();
set(configuration, DOT_AUTH_URL, "http://localhost:8080");
set(configuration, DOT_TENANT, "tenant");
set(configuration, DOT_USERNAME, "username");
set(configuration, DOT_PASSWORD, "password");
return configuration;
}
private void set(Configuration configuration, String field, String value) {
configuration.set(SWIFT_SERVICE_PREFIX + SERVICE + field, value);
}
private SwiftRestClient mkInstance(Configuration configuration) throws
IOException,
URISyntaxException {
URI uri = new URI("swift://container.openstack/");
return SwiftRestClient.getInstance(uri, configuration);
}
}
| 7,992 | 39.989744 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemExtendedContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.http.RestClientBindings;
import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
public class TestSwiftFileSystemExtendedContract extends SwiftFileSystemBaseTest {
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testOpenNonExistingFile() throws IOException {
final Path p = new Path("/test/testOpenNonExistingFile");
//open it as a file, should get FileNotFoundException
try {
final FSDataInputStream in = fs.open(p);
in.close();
fail("didn't expect to get here");
} catch (FileNotFoundException fnfe) {
LOG.debug("Expected: " + fnfe, fnfe);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testFilesystemHasURI() throws Throwable {
assertNotNull(fs.getUri());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testCreateFile() throws Exception {
final Path f = new Path("/test/testCreateFile");
final FSDataOutputStream fsDataOutputStream = fs.create(f);
fsDataOutputStream.close();
assertExists("created file", f);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testWriteReadFile() throws Exception {
final Path f = new Path("/test/test");
final FSDataOutputStream fsDataOutputStream = fs.create(f);
final String message = "Test string";
fsDataOutputStream.write(message.getBytes());
fsDataOutputStream.close();
assertExists("created file", f);
FSDataInputStream open = null;
try {
open = fs.open(f);
final byte[] bytes = new byte[512];
final int read = open.read(bytes);
final byte[] buffer = new byte[read];
System.arraycopy(bytes, 0, buffer, 0, read);
assertEquals(message, new String(buffer));
} finally {
fs.delete(f, false);
IOUtils.closeStream(open);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testConfDefinesFilesystem() throws Throwable {
Configuration conf = new Configuration();
SwiftTestUtils.getServiceURI(conf);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testConfIsValid() throws Throwable {
Configuration conf = new Configuration();
URI fsURI = SwiftTestUtils.getServiceURI(conf);
RestClientBindings.bind(fsURI, conf);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testGetSchemeImplemented() throws Throwable {
String scheme = fs.getScheme();
assertEquals(SwiftNativeFileSystem.SWIFT,scheme);
}
/**
* Assert that a filesystem is case sensitive.
* This is done by creating a mixed-case filename and asserting that
* its lower case version is not there.
*
* @throws Exception failures
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testFilesystemIsCaseSensitive() throws Exception {
String mixedCaseFilename = "/test/UPPER.TXT";
Path upper = path(mixedCaseFilename);
Path lower = path(StringUtils.toLowerCase(mixedCaseFilename));
assertFalse("File exists" + upper, fs.exists(upper));
assertFalse("File exists" + lower, fs.exists(lower));
FSDataOutputStream out = fs.create(upper);
out.writeUTF("UPPER");
out.close();
FileStatus upperStatus = fs.getFileStatus(upper);
assertExists("Original upper case file" + upper, upper);
//verify the lower-case version of the filename doesn't exist
assertPathDoesNotExist("lower case file", lower);
//now overwrite the lower case version of the filename with a
//new version.
out = fs.create(lower);
out.writeUTF("l");
out.close();
assertExists("lower case file", lower);
//verifEy the length of the upper file hasn't changed
assertExists("Original upper case file " + upper, upper);
FileStatus newStatus = fs.getFileStatus(upper);
assertEquals("Expected status:" + upperStatus
+ " actual status " + newStatus,
upperStatus.getLen(),
newStatus.getLen());
}
}
| 5,183 | 35 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestLogResources.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Test;
import java.net.URL;
/**
* This test just debugs which log resources are being picked up
*/
public class TestLogResources implements SwiftTestConstants {
protected static final Log LOG =
LogFactory.getLog(TestLogResources.class);
private void printf(String format, Object... args) {
String msg = String.format(format, args);
System.out.printf(msg + "\n");
LOG.info(msg);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testWhichLog4JPropsFile() throws Throwable {
locateResource("log4j.properties");
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testWhichLog4JXMLFile() throws Throwable {
locateResource("log4j.XML");
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testCommonsLoggingProps() throws Throwable {
locateResource("commons-logging.properties");
}
private void locateResource(String resource) {
URL url = this.getClass().getClassLoader().getResource(resource);
if (url != null) {
printf("resource %s is at %s", resource, url);
} else {
printf("resource %s is not on the classpath", resource);
}
}
}
| 2,076 | 31.453125 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemRead.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import java.io.EOFException;
import java.io.IOException;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.readBytesToString;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.writeTextFile;
/**
* Test filesystem read operations
*/
public class TestSwiftFileSystemRead extends SwiftFileSystemBaseTest {
/**
* Read past the end of a file: expect the operation to fail
* @throws IOException
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testOverRead() throws IOException {
final String message = "message";
final Path filePath = new Path("/test/file.txt");
writeTextFile(fs, filePath, message, false);
try {
readBytesToString(fs, filePath, 20);
fail("expected an exception");
} catch (EOFException e) {
//expected
}
}
/**
* Read and write some JSON
* @throws IOException
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRWJson() throws IOException {
final String message = "{" +
" 'json': { 'i':43, 'b':true}," +
" 's':'string'" +
"}";
final Path filePath = new Path("/test/file.json");
writeTextFile(fs, filePath, message, false);
String readJson = readBytesToString(fs, filePath, message.length());
assertEquals(message,readJson);
//now find out where it is
FileStatus status = fs.getFileStatus(filePath);
BlockLocation[] locations = fs.getFileBlockLocations(status, 0, 10);
}
/**
* Read and write some XML
* @throws IOException
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRWXML() throws IOException {
final String message = "<x>" +
" <json i='43' 'b'=true/>" +
" string" +
"</x>";
final Path filePath = new Path("/test/file.xml");
writeTextFile(fs, filePath, message, false);
String read = readBytesToString(fs, filePath, message.length());
assertEquals(message,read);
}
}
| 3,036 | 30.968421 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDelete.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
import java.io.IOException;
/**
* Test deletion operations
*/
public class TestSwiftFileSystemDelete extends SwiftFileSystemBaseTest {
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDeleteEmptyFile() throws IOException {
final Path file = new Path("/test/testDeleteEmptyFile");
createEmptyFile(file);
SwiftTestUtils.noteAction("about to delete");
assertDeleted(file, true);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDeleteEmptyFileTwice() throws IOException {
final Path file = new Path("/test/testDeleteEmptyFileTwice");
createEmptyFile(file);
assertDeleted(file, true);
SwiftTestUtils.noteAction("multiple creates, and deletes");
assertFalse("Delete returned true", fs.delete(file, false));
createEmptyFile(file);
assertDeleted(file, true);
assertFalse("Delete returned true", fs.delete(file, false));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDeleteNonEmptyFile() throws IOException {
final Path file = new Path("/test/testDeleteNonEmptyFile");
createFile(file);
assertDeleted(file, true);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDeleteNonEmptyFileTwice() throws IOException {
final Path file = new Path("/test/testDeleteNonEmptyFileTwice");
createFile(file);
assertDeleted(file, true);
assertFalse("Delete returned true", fs.delete(file, false));
createFile(file);
assertDeleted(file, true);
assertFalse("Delete returned true", fs.delete(file, false));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDeleteTestDir() throws IOException {
final Path file = new Path("/test/");
fs.delete(file, true);
assertPathDoesNotExist("Test dir found", file);
}
/**
* Test recursive root directory deletion fails if there is an entry underneath
* @throws Throwable
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRmRootDirRecursiveIsForbidden() throws Throwable {
Path root = path("/");
Path testFile = path("/test");
createFile(testFile);
assertTrue("rm(/) returned false", fs.delete(root, true));
assertExists("Root dir is missing", root);
assertPathDoesNotExist("test file not deleted", testFile);
}
}
| 3,204 | 34.21978 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemConcurrency.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
/**
* Test Swift FS concurrency logic. This isn't a very accurate test,
* because it is hard to consistently generate race conditions.
* Consider it "best effort"
*/
public class TestSwiftFileSystemConcurrency extends SwiftFileSystemBaseTest {
protected static final Log LOG =
LogFactory.getLog(TestSwiftFileSystemConcurrency.class);
private Exception thread1Ex, thread2Ex;
public static final String TEST_RACE_CONDITION_ON_DELETE_DIR =
"/test/testraceconditionondirdeletetest";
/**
* test on concurrent file system changes
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRaceConditionOnDirDeleteTest() throws Exception {
SwiftTestUtils.skip("Skipping unreliable test");
final String message = "message";
final Path fileToRead = new Path(
TEST_RACE_CONDITION_ON_DELETE_DIR +"/files/many-files/file");
final ExecutorService executorService = Executors.newFixedThreadPool(2);
fs.create(new Path(TEST_RACE_CONDITION_ON_DELETE_DIR +"/file/test/file1"));
fs.create(new Path(TEST_RACE_CONDITION_ON_DELETE_DIR + "/documents/doc1"));
fs.create(new Path(
TEST_RACE_CONDITION_ON_DELETE_DIR + "/pictures/picture"));
executorService.execute(new Runnable() {
@Override
public void run() {
try {
assertDeleted(new Path(TEST_RACE_CONDITION_ON_DELETE_DIR), true);
} catch (IOException e) {
LOG.warn("deletion thread:" + e, e);
thread1Ex = e;
throw new RuntimeException(e);
}
}
});
executorService.execute(new Runnable() {
@Override
public void run() {
try {
final FSDataOutputStream outputStream = fs.create(fileToRead);
outputStream.write(message.getBytes());
outputStream.close();
} catch (IOException e) {
LOG.warn("writer thread:" + e, e);
thread2Ex = e;
throw new RuntimeException(e);
}
}
});
executorService.awaitTermination(1, TimeUnit.MINUTES);
if (thread1Ex != null) {
throw thread1Ex;
}
if (thread2Ex != null) {
throw thread2Ex;
}
try {
fs.open(fileToRead);
LOG.info("concurrency test failed to trigger a failure");
} catch (FileNotFoundException expected) {
}
}
}
| 3,593 | 32.90566 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/SwiftTestConstants.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
/**
* Hard coded constants for the test timeouts
*/
public interface SwiftTestConstants {
/**
* Timeout for swift tests: {@value}
*/
int SWIFT_TEST_TIMEOUT = 5 * 60 * 1000;
/**
* Timeout for tests performing bulk operations: {@value}
*/
int SWIFT_BULK_IO_TEST_TIMEOUT = 12 * 60 * 1000;
}
| 1,168 | 32.4 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBlocksize.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
/**
* Tests that blocksize is never zero for a file, either in the FS default
* or the FileStatus value of a queried file
*/
public class TestSwiftFileSystemBlocksize extends SwiftFileSystemBaseTest {
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDefaultBlocksizeNonZero() throws Throwable {
assertTrue("Zero default blocksize", 0L != getFs().getDefaultBlockSize());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDefaultBlocksizeRootPathNonZero() throws Throwable {
assertTrue("Zero default blocksize",
0L != getFs().getDefaultBlockSize(new Path("/")));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDefaultBlocksizeOtherPathNonZero() throws Throwable {
assertTrue("Zero default blocksize",
0L != getFs().getDefaultBlockSize(new Path("/test")));
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testBlocksizeNonZeroForFile() throws Throwable {
Path smallfile = new Path("/test/smallfile");
SwiftTestUtils.writeTextFile(fs, smallfile, "blocksize", true);
createFile(smallfile);
FileStatus status = getFs().getFileStatus(smallfile);
assertTrue("Zero blocksize in " + status,
status.getBlockSize() != 0L);
assertTrue("Zero replication in " + status,
status.getReplication() != 0L);
}
}
| 2,344 | 37.442623 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.snative.SwiftFileStatus;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
import java.io.FileNotFoundException;
/**
* Test swift-specific directory logic.
* This class is HDFS-1 compatible; its designed to be subclases by something
* with HDFS2 extensions
*/
public class TestSwiftFileSystemDirectories extends SwiftFileSystemBaseTest {
/**
* Asserts that a zero byte file has a status of file and not
* directory or symlink
*
* @throws Exception on failures
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testZeroByteFilesAreDirectories() throws Exception {
Path src = path("/test/testZeroByteFilesAreFiles");
//create a zero byte file
SwiftTestUtils.touch(fs, src);
SwiftTestUtils.assertIsDirectory(fs, src);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testNoStatusForMissingDirectories() throws Throwable {
Path missing = path("/test/testNoStatusForMissingDirectories");
assertPathDoesNotExist("leftover?", missing);
try {
FileStatus[] statuses = fs.listStatus(missing);
//not expected
fail("Expected a FileNotFoundException, got the status " + statuses);
} catch (FileNotFoundException expected) {
//expected
}
}
/**
* test that a dir off root has a listStatus() call that
* works as expected. and that when a child is added. it changes
*
* @throws Exception on failures
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception {
Path test = path("/test");
fs.delete(test, true);
mkdirs(test);
assertExists("created test directory", test);
FileStatus[] statuses = fs.listStatus(test);
String statusString = statusToString(test.toString(), statuses);
assertEquals("Wrong number of elements in file status " + statusString, 0,
statuses.length);
Path src = path("/test/file");
//create a zero byte file
SwiftTestUtils.touch(fs, src);
//stat it
statuses = fs.listStatus(test);
statusString = statusToString(test.toString(), statuses);
assertEquals("Wrong number of elements in file status " + statusString, 1,
statuses.length);
SwiftFileStatus stat = (SwiftFileStatus) statuses[0];
assertTrue("isDir(): Not a directory: " + stat, stat.isDir());
extraStatusAssertions(stat);
}
/**
* test that a dir two levels down has a listStatus() call that
* works as expected.
*
* @throws Exception on failures
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDirectoriesLowerDownHaveMatchingFileStatus() throws Exception {
Path test = path("/test/testDirectoriesLowerDownHaveMatchingFileStatus");
fs.delete(test, true);
mkdirs(test);
assertExists("created test sub directory", test);
FileStatus[] statuses = fs.listStatus(test);
String statusString = statusToString(test.toString(), statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,
statuses.length);
}
private String statusToString(String pathname,
FileStatus[] statuses) {
assertNotNull(statuses);
return SwiftTestUtils.dumpStats(pathname,statuses);
}
/**
* method for subclasses to add extra assertions
* @param stat status to look at
*/
protected void extraStatusAssertions(SwiftFileStatus stat) {
}
/**
* Asserts that a zero byte file has a status of file and not
* directory or symlink
*
* @throws Exception on failures
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testMultiByteFilesAreFiles() throws Exception {
Path src = path("/test/testMultiByteFilesAreFiles");
SwiftTestUtils.writeTextFile(fs, src, "testMultiByteFilesAreFiles", false);
assertIsFile(src);
FileStatus status = fs.getFileStatus(src);
assertFalse(status.isDir());
}
}
| 4,891 | 33.450704 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBlockLocation.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.http.SwiftProtocolConstants;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
import java.io.IOException;
/**
* Test block location logic.
* The endpoint may or may not be location-aware
*/
public class TestSwiftFileSystemBlockLocation extends SwiftFileSystemBaseTest {
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateSingleFileBlocks() throws Throwable {
describe("verify that a file returns 1+ blocks");
FileStatus fileStatus = createFileAndGetStatus();
BlockLocation[] locations =
getFs().getFileBlockLocations(fileStatus, 0, 1);
assertNotEqual("No block locations supplied for " + fileStatus, 0,
locations.length);
for (BlockLocation location : locations) {
assertLocationValid(location);
}
}
private void assertLocationValid(BlockLocation location) throws
IOException {
LOG.info(location);
String[] hosts = location.getHosts();
String[] names = location.getNames();
assertNotEqual("No hosts supplied for " + location, 0, hosts.length);
//for every host, there's a name.
assertEquals("Unequal names and hosts in " + location,
hosts.length, names.length);
assertEquals(SwiftProtocolConstants.BLOCK_LOCATION,
location.getNames()[0]);
assertEquals(SwiftProtocolConstants.TOPOLOGY_PATH,
location.getTopologyPaths()[0]);
}
private FileStatus createFileAndGetStatus() throws IOException {
Path path = path("/test/locatedFile");
createFile(path);
return fs.getFileStatus(path);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateNullStatus() throws Throwable {
describe("verify that a null filestatus maps to a null location array");
BlockLocation[] locations =
getFs().getFileBlockLocations((FileStatus) null, 0, 1);
assertNull(locations);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateNegativeSeek() throws Throwable {
describe("verify that a negative offset is illegal");
try {
BlockLocation[] locations =
getFs().getFileBlockLocations(createFileAndGetStatus(),
-1,
1);
fail("Expected an exception, got " + locations.length + " locations");
} catch (IllegalArgumentException e) {
//expected
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateNegativeLen() throws Throwable {
describe("verify that a negative length is illegal");
try {
BlockLocation[] locations =
getFs().getFileBlockLocations(createFileAndGetStatus(),
0,
-1);
fail("Expected an exception, got " + locations.length + " locations");
} catch (IllegalArgumentException e) {
//expected
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateOutOfRangeLen() throws Throwable {
describe("overshooting the length is legal, as long as the" +
" origin location is valid");
BlockLocation[] locations =
getFs().getFileBlockLocations(createFileAndGetStatus(),
0,
data.length + 100);
assertNotNull(locations);
assertTrue(locations.length > 0);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateOutOfRangeSrc() throws Throwable {
describe("Seeking out of the file length returns an empty array");
BlockLocation[] locations =
getFs().getFileBlockLocations(createFileAndGetStatus(),
data.length + 100,
1);
assertEmptyBlockLocations(locations);
}
private void assertEmptyBlockLocations(BlockLocation[] locations) {
assertNotNull(locations);
if (locations.length!=0) {
fail("non empty locations[] with first entry of " + locations[0]);
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateDirectory() throws Throwable {
describe("verify that locating a directory is an error");
createFile(path("/test/filename"));
FileStatus status = fs.getFileStatus(path("/test"));
LOG.info("Filesystem is " + fs + "; target is " + status);
SwiftTestUtils.assertIsDirectory(status);
BlockLocation[] locations;
locations = getFs().getFileBlockLocations(status,
0,
1);
assertEmptyBlockLocations(locations);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateRootDirectory() throws Throwable {
describe("verify that locating the root directory is an error");
FileStatus status = fs.getFileStatus(path("/"));
SwiftTestUtils.assertIsDirectory(status);
BlockLocation[] locations;
locations = getFs().getFileBlockLocations(status,
0,
1);
assertEmptyBlockLocations(locations);
}
}
| 6,123 | 35.452381 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/SwiftFileSystemBaseTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftOperationFailedException;
import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem;
import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystemStore;
import org.apache.hadoop.fs.swift.util.DurationStats;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.assertPathExists;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.cleanupInTeardown;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.getServiceURI;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.noteAction;
/**
* This is the base class for most of the Swift tests
*/
public class SwiftFileSystemBaseTest extends Assert implements
SwiftTestConstants {
protected static final Log LOG =
LogFactory.getLog(SwiftFileSystemBaseTest.class);
protected SwiftNativeFileSystem fs;
protected static SwiftNativeFileSystem lastFs;
protected byte[] data = SwiftTestUtils.dataset(getBlockSize() * 2, 0, 255);
private Configuration conf;
@Before
public void setUp() throws Exception {
noteAction("setup");
final URI uri = getFilesystemURI();
conf = createConfiguration();
fs = createSwiftFS();
try {
fs.initialize(uri, conf);
} catch (IOException e) {
//FS init failed, set it to null so that teardown doesn't
//attempt to use it
fs = null;
throw e;
}
//remember the last FS
lastFs = fs;
noteAction("setup complete");
}
/**
* Configuration generator. May be overridden to inject
* some custom options
* @return a configuration with which to create FS instances
*/
protected Configuration createConfiguration() {
return new Configuration();
}
@After
public void tearDown() throws Exception {
cleanupInTeardown(fs, "/test");
}
@AfterClass
public static void classTearDown() throws Exception {
if (lastFs != null) {
List<DurationStats> statistics = lastFs.getOperationStatistics();
for (DurationStats stat : statistics) {
LOG.info(stat.toString());
}
}
}
/**
* Get the configuration used to set up the FS
* @return the configuration
*/
public Configuration getConf() {
return conf;
}
/**
* Describe the test, combining some logging with details
* for people reading the code
*
* @param description test description
*/
protected void describe(String description) {
noteAction(description);
}
protected URI getFilesystemURI() throws URISyntaxException, IOException {
return getServiceURI(createConfiguration());
}
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
protected int getBlockSize() {
return 1024;
}
/**
* Is rename supported?
* @return true
*/
protected boolean renameSupported() {
return true;
}
/**
* assume in a test that rename is supported;
* skip it if not
*/
protected void assumeRenameSupported() {
Assume.assumeTrue(renameSupported());
}
/**
* Take an unqualified path, and qualify it w.r.t the
* current filesystem
* @param pathString source path
* @return a qualified path instance
*/
protected Path path(String pathString) {
return new Path(pathString).makeQualified(fs);
}
/**
* Get the filesystem
* @return the current FS
*/
public SwiftNativeFileSystem getFs() {
return fs;
}
/**
* Create a file using the standard {@link #data} bytes.
*
* @param path path to write
* @throws IOException on any problem
*/
protected void createFile(Path path) throws IOException {
createFile(path, data);
}
/**
* Create a file with the given data.
*
* @param path path to write
* @param sourceData source dataset
* @throws IOException on any problem
*/
protected void createFile(Path path, byte[] sourceData) throws IOException {
FSDataOutputStream out = fs.create(path);
out.write(sourceData, 0, sourceData.length);
out.close();
}
/**
* Create and then close a file
* @param path path to create
* @throws IOException on a failure
*/
protected void createEmptyFile(Path path) throws IOException {
FSDataOutputStream out = fs.create(path);
out.close();
}
/**
* Get the inner store -useful for lower level operations
*
* @return the store
*/
protected SwiftNativeFileSystemStore getStore() {
return fs.getStore();
}
/**
* Rename a path
* @param src source
* @param dst dest
* @param renameMustSucceed flag to say "this rename must exist"
* @param srcExists add assert that the source exists afterwards
* @param dstExists add assert the dest exists afterwards
* @throws IOException IO trouble
*/
protected void rename(Path src, Path dst, boolean renameMustSucceed,
boolean srcExists, boolean dstExists) throws IOException {
if (renameMustSucceed) {
renameToSuccess(src, dst, srcExists, dstExists);
} else {
renameToFailure(src, dst);
}
}
/**
* Get a string describing the outcome of a rename, by listing the dest
* path and its parent along with some covering text
* @param src source patj
* @param dst dest path
* @return a string for logs and exceptions
* @throws IOException IO problems
*/
private String getRenameOutcome(Path src, Path dst) throws IOException {
String lsDst = ls(dst);
Path parent = dst.getParent();
String lsParent = parent != null ? ls(parent) : "";
return " result of " + src + " => " + dst
+ " - " + lsDst
+ " \n" + lsParent;
}
/**
* Rename, expecting an exception to be thrown
*
* @param src source
* @param dst dest
* @throws IOException a failure other than an
* expected SwiftRenameException or FileNotFoundException
*/
protected void renameToFailure(Path src, Path dst) throws IOException {
try {
getStore().rename(src, dst);
fail("Expected failure renaming " + src + " to " + dst
+ "- but got success");
} catch (SwiftOperationFailedException e) {
LOG.debug("Rename failed (expected):" + e);
} catch (FileNotFoundException e) {
LOG.debug("Rename failed (expected):" + e);
}
}
/**
* Rename to success
*
* @param src source
* @param dst dest
* @param srcExists add assert that the source exists afterwards
* @param dstExists add assert the dest exists afterwards
* @throws SwiftOperationFailedException operation failure
* @throws IOException IO problems
*/
protected void renameToSuccess(Path src, Path dst,
boolean srcExists, boolean dstExists)
throws SwiftOperationFailedException, IOException {
getStore().rename(src, dst);
String outcome = getRenameOutcome(src, dst);
assertEquals("Source " + src + "exists: " + outcome,
srcExists, fs.exists(src));
assertEquals("Destination " + dstExists + " exists" + outcome,
dstExists, fs.exists(dst));
}
/**
* List a path in the test FS
* @param path path to list
* @return the contents of the path/dir
* @throws IOException IO problems
*/
protected String ls(Path path) throws IOException {
return SwiftTestUtils.ls(fs, path);
}
/**
* assert that a path exists
* @param message message to use in an assertion
* @param path path to probe
* @throws IOException IO problems
*/
public void assertExists(String message, Path path) throws IOException {
assertPathExists(fs, message, path);
}
/**
* assert that a path does not
* @param message message to use in an assertion
* @param path path to probe
* @throws IOException IO problems
*/
public void assertPathDoesNotExist(String message, Path path) throws
IOException {
SwiftTestUtils.assertPathDoesNotExist(fs, message, path);
}
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
*
* @param filename name of the file
* @throws IOException IO problems during file operations
*/
protected void assertIsFile(Path filename) throws IOException {
SwiftTestUtils.assertIsFile(fs, filename);
}
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
*
* @throws IOException IO problems during file operations
*/
protected void mkdirs(Path path) throws IOException {
assertTrue("Failed to mkdir" + path, fs.mkdirs(path));
}
/**
* Assert that a delete succeeded
* @param path path to delete
* @param recursive recursive flag
* @throws IOException IO problems
*/
protected void assertDeleted(Path path, boolean recursive) throws IOException {
SwiftTestUtils.assertDeleted(fs, path, recursive);
}
/**
* Assert that a value is not equal to the expected value
* @param message message if the two values are equal
* @param expected expected value
* @param actual actual value
*/
protected void assertNotEqual(String message, int expected, int actual) {
assertTrue(message,
actual != expected);
}
/**
* Get the number of partitions written from the Swift Native FS APIs
* @param out output stream
* @return the number of partitioned files written by the stream
*/
protected int getPartitionsWritten(FSDataOutputStream out) {
return SwiftNativeFileSystem.getPartitionsWritten(out);
}
/**
* Assert that the no. of partitions written matches expectations
* @param action operation (for use in the assertions)
* @param out output stream
* @param expected expected no. of partitions
*/
protected void assertPartitionsWritten(String action, FSDataOutputStream out,
long expected) {
OutputStream nativeStream = out.getWrappedStream();
int written = getPartitionsWritten(out);
if(written !=expected) {
Assert.fail(action + ": " +
TestSwiftFileSystemPartitionedUploads.WRONG_PARTITION_COUNT
+ " + expected: " + expected + " actual: " + written
+ " -- " + nativeStream);
}
}
/**
* Assert that the result value == -1; which implies
* that a read was successful
* @param text text to include in a message (usually the operation)
* @param result read result to validate
*/
protected void assertMinusOne(String text, int result) {
assertEquals(text + " wrong read result " + result, -1, result);
}
}
| 12,260 | 29.57606 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/AcceptAllFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
/**
* A path filter that accepts everything
*/
public class AcceptAllFilter implements PathFilter {
@Override
public boolean accept(Path file) {
return true;
}
}
| 1,092 | 33.15625 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestReadPastBuffer.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.http.SwiftProtocolConstants;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Test;
/**
* Seek tests verify that
* <ol>
* <li>When you seek on a 0 byte file to byte (0), it's not an error.</li>
* <li>When you seek past the end of a file, it's an error that should
* raise -what- EOFException?</li>
* <li>when you seek forwards, you get new data</li>
* <li>when you seek backwards, you get the previous data</li>
* <li>That this works for big multi-MB files as well as small ones.</li>
* </ol>
* These may seem "obvious", but the more the input streams try to be clever
* about offsets and buffering, the more likely it is that seek() will start
* to get confused.
*/
public class TestReadPastBuffer extends SwiftFileSystemBaseTest {
protected static final Log LOG =
LogFactory.getLog(TestReadPastBuffer.class);
public static final int SWIFT_READ_BLOCKSIZE = 4096;
public static final int SEEK_FILE_LEN = SWIFT_READ_BLOCKSIZE * 2;
private Path testPath;
private Path readFile;
private Path zeroByteFile;
private FSDataInputStream instream;
/**
* Get a configuration which a small blocksize reported to callers
* @return a configuration for this test
*/
@Override
public Configuration getConf() {
Configuration conf = super.getConf();
/*
* set to 4KB
*/
conf.setInt(SwiftProtocolConstants.SWIFT_BLOCKSIZE, SWIFT_READ_BLOCKSIZE);
return conf;
}
/**
* Setup creates dirs under test/hadoop
*
* @throws Exception
*/
@Override
public void setUp() throws Exception {
super.setUp();
byte[] block = SwiftTestUtils.dataset(SEEK_FILE_LEN, 0, 255);
//delete the test directory
testPath = path("/test");
readFile = new Path(testPath, "TestReadPastBuffer.txt");
createFile(readFile, block);
}
@After
public void cleanFile() {
IOUtils.closeStream(instream);
instream = null;
}
/**
* Create a config with a 1KB request size
* @return a config
*/
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.set(SwiftProtocolConstants.SWIFT_REQUEST_SIZE, "1");
return conf;
}
/**
* Seek past the buffer then read
* @throws Throwable problems
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekAndReadPastEndOfFile() throws Throwable {
instream = fs.open(readFile);
assertEquals(0, instream.getPos());
//expect that seek to 0 works
//go just before the end
instream.seek(SEEK_FILE_LEN - 2);
assertTrue("Premature EOF", instream.read() != -1);
assertTrue("Premature EOF", instream.read() != -1);
assertMinusOne("read past end of file", instream.read());
}
/**
* Seek past the buffer and attempt a read(buffer)
* @throws Throwable failures
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekBulkReadPastEndOfFile() throws Throwable {
instream = fs.open(readFile);
assertEquals(0, instream.getPos());
//go just before the end
instream.seek(SEEK_FILE_LEN - 1);
byte[] buffer = new byte[1];
int result = instream.read(buffer, 0, 1);
//next byte is expected to fail
result = instream.read(buffer, 0, 1);
assertMinusOne("read past end of file", result);
//and this one
result = instream.read(buffer, 0, 1);
assertMinusOne("read past end of file", result);
//now do an 0-byte read and expect it to
//to be checked first
result = instream.read(buffer, 0, 0);
assertEquals("EOF checks coming before read range check", 0, result);
}
/**
* Read past the buffer size byte by byte and verify that it refreshed
* @throws Throwable
*/
@Test
public void testReadPastBufferSize() throws Throwable {
instream = fs.open(readFile);
while (instream.read() != -1);
//here we have gone past the end of a file and its buffer. Now try again
assertMinusOne("reading after the (large) file was read: "+ instream,
instream.read());
}
}
| 5,214 | 30.79878 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.http.SwiftProtocolConstants;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.fs.swift.SwiftTestConstants.SWIFT_TEST_TIMEOUT;
import java.io.IOException;
import java.net.URI;
public class TestFSMainOperationsSwift extends FSMainOperationsBaseTest {
@Override
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
//small blocksize for faster remote tests
conf.setInt(SwiftProtocolConstants.SWIFT_BLOCKSIZE, 2);
URI serviceURI = SwiftTestUtils.getServiceURI(conf);
fSys = FileSystem.get(serviceURI, conf);
super.setUp();
}
private Path wd = null;
@Override
protected FileSystem createFileSystem() throws Exception {
return fSys;
}
@Override
protected Path getDefaultWorkingDirectory() throws IOException {
if (wd == null) {
wd = fSys.getWorkingDirectory();
}
return wd;
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testWDAbsolute() throws IOException {
Path absoluteDir = getTestRootPath(fSys, "test/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testListStatusThrowsExceptionForUnreadableDir() {
SwiftTestUtils.skip("unsupported");
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testFsStatus() throws Exception {
super.testFsStatus();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testWorkingDirectory() throws Exception {
super.testWorkingDirectory();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testMkdirs() throws Exception {
super.testMkdirs();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
super.testMkdirsFailsForSubdirectoryOfExistingFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGetFileStatusThrowsExceptionForNonExistentFile() throws
Exception {
super.testGetFileStatusThrowsExceptionForNonExistentFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testListStatusThrowsExceptionForNonExistentFile() throws
Exception {
super.testListStatusThrowsExceptionForNonExistentFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testListStatus() throws Exception {
super.testListStatus();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testListStatusFilterWithNoMatches() throws Exception {
super.testListStatusFilterWithNoMatches();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testListStatusFilterWithSomeMatches() throws Exception {
super.testListStatusFilterWithSomeMatches();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusNonExistentFile() throws Exception {
super.testGlobStatusNonExistentFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusWithNoMatchesInPath() throws Exception {
super.testGlobStatusWithNoMatchesInPath();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusSomeMatchesInDirectories() throws Exception {
super.testGlobStatusSomeMatchesInDirectories();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
super.testGlobStatusWithMultipleWildCardMatches();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
super.testGlobStatusWithMultipleMatchesOfSingleChar();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusFilterWithEmptyPathResults() throws Exception {
super.testGlobStatusFilterWithEmptyPathResults();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws
Exception {
super.testGlobStatusFilterWithSomePathMatchesAndTrivialFilter();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws
Exception {
super.testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws
Exception {
super.testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter() throws
Exception {
super.testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws
Exception {
super.testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testWriteReadAndDeleteEmptyFile() throws Exception {
super.testWriteReadAndDeleteEmptyFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testWriteReadAndDeleteHalfABlock() throws Exception {
super.testWriteReadAndDeleteHalfABlock();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testWriteReadAndDeleteOneBlock() throws Exception {
super.testWriteReadAndDeleteOneBlock();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
super.testWriteReadAndDeleteOneAndAHalfBlocks();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testWriteReadAndDeleteTwoBlocks() throws Exception {
super.testWriteReadAndDeleteTwoBlocks();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testOverwrite() throws IOException {
super.testOverwrite();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testWriteInNonExistentDirectory() throws IOException {
super.testWriteInNonExistentDirectory();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testDeleteNonExistentFile() throws IOException {
super.testDeleteNonExistentFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testDeleteRecursively() throws IOException {
super.testDeleteRecursively();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testDeleteEmptyDirectory() throws IOException {
super.testDeleteEmptyDirectory();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameNonExistentPath() throws Exception {
super.testRenameNonExistentPath();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameFileToNonExistentDirectory() throws Exception {
super.testRenameFileToNonExistentDirectory();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameFileToDestinationWithParentFile() throws Exception {
super.testRenameFileToDestinationWithParentFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameFileToExistingParent() throws Exception {
super.testRenameFileToExistingParent();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameFileToItself() throws Exception {
super.testRenameFileToItself();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameFileAsExistingFile() throws Exception {
super.testRenameFileAsExistingFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameFileAsExistingDirectory() throws Exception {
super.testRenameFileAsExistingDirectory();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameDirectoryToItself() throws Exception {
super.testRenameDirectoryToItself();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameDirectoryToNonExistentParent() throws Exception {
super.testRenameDirectoryToNonExistentParent();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameDirectoryAsNonExistentDirectory() throws Exception {
super.testRenameDirectoryAsNonExistentDirectory();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameDirectoryAsEmptyDirectory() throws Exception {
super.testRenameDirectoryAsEmptyDirectory();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameDirectoryAsNonEmptyDirectory() throws Exception {
super.testRenameDirectoryAsNonEmptyDirectory();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testRenameDirectoryAsFile() throws Exception {
super.testRenameDirectoryAsFile();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testInputStreamClosedTwice() throws IOException {
super.testInputStreamClosedTwice();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testOutputStreamClosedTwice() throws IOException {
super.testOutputStreamClosedTwice();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testGetWrappedInputStream() throws IOException {
super.testGetWrappedInputStream();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
@Override
public void testCopyToLocalWithUseRawLocalFileSystemOption() throws
Exception {
super.testCopyToLocalWithUseRawLocalFileSystemOption();
}
}
| 11,285 | 29.752044 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/scale/TestWriteManySmallFiles.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.scale;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.util.Duration;
import org.apache.hadoop.fs.swift.util.DurationStats;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
public class TestWriteManySmallFiles extends SwiftScaleTestBase {
public static final Log LOG = LogFactory.getLog(TestWriteManySmallFiles.class);
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testScaledWriteThenRead() throws Throwable {
Path dir = new Path("/test/manysmallfiles");
Duration rm1 = new Duration();
fs.delete(dir, true);
rm1.finished();
fs.mkdirs(dir);
Duration ls1 = new Duration();
fs.listStatus(dir);
ls1.finished();
long count = getOperationCount();
SwiftTestUtils.noteAction("Beginning Write of "+ count + " files ");
DurationStats writeStats = new DurationStats("write");
DurationStats readStats = new DurationStats("read");
String format = "%08d";
for (long l = 0; l < count; l++) {
String name = String.format(format, l);
Path p = new Path(dir, "part-" + name);
Duration d = new Duration();
SwiftTestUtils.writeTextFile(fs, p, name, false);
d.finished();
writeStats.add(d);
Thread.sleep(1000);
}
//at this point, the directory is full.
SwiftTestUtils.noteAction("Beginning ls");
Duration ls2 = new Duration();
FileStatus[] status2 = (FileStatus[]) fs.listStatus(dir);
ls2.finished();
assertEquals("Not enough entries in the directory", count, status2.length);
SwiftTestUtils.noteAction("Beginning read");
for (long l = 0; l < count; l++) {
String name = String.format(format, l);
Path p = new Path(dir, "part-" + name);
Duration d = new Duration();
String result = SwiftTestUtils.readBytesToString(fs, p, name.length());
assertEquals(name, result);
d.finished();
readStats.add(d);
}
//do a recursive delete
SwiftTestUtils.noteAction("Beginning delete");
Duration rm2 = new Duration();
fs.delete(dir, true);
rm2.finished();
//print the stats
LOG.info(String.format("'filesystem','%s'",fs.getUri()));
LOG.info(writeStats.toString());
LOG.info(readStats.toString());
LOG.info(String.format(
"'rm1',%d,'ls1',%d",
rm1.value(),
ls1.value()));
LOG.info(String.format(
"'rm2',%d,'ls2',%d",
rm2.value(),
ls2.value()));
}
}
| 3,424 | 34.309278 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/scale/SwiftScaleTestBase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.scale;
import org.apache.hadoop.fs.swift.SwiftFileSystemBaseTest;
/**
* Base class for scale tests; here is where the common scale configuration
* keys are defined
*/
public class SwiftScaleTestBase extends SwiftFileSystemBaseTest {
public static final String SCALE_TEST = "scale.test.";
public static final String KEY_OPERATION_COUNT = SCALE_TEST + "operation.count";
public static final long DEFAULT_OPERATION_COUNT = 10;
protected long getOperationCount() {
return getConf().getLong(KEY_OPERATION_COUNT, DEFAULT_OPERATION_COUNT);
}
}
| 1,414 | 36.236842 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/hdfs2/TestV2LsOperations.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.hdfs2;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.swift.SwiftFileSystemBaseTest;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Test;
import java.io.IOException;
public class TestV2LsOperations extends SwiftFileSystemBaseTest {
private Path[] testDirs;
/**
* Setup creates dirs under test/hadoop
* @throws Exception
*/
@Override
public void setUp() throws Exception {
super.setUp();
//delete the test directory
Path test = path("/test");
fs.delete(test, true);
mkdirs(test);
}
/**
* Create subdirectories and files under test/ for those tests
* that want them. Doing so adds overhead to setup and teardown,
* so should only be done for those tests that need them.
* @throws IOException on an IO problem
*/
private void createTestSubdirs() throws IOException {
testDirs = new Path[]{
path("/test/hadoop/a"),
path("/test/hadoop/b"),
path("/test/hadoop/c/1"),
};
assertPathDoesNotExist("test directory setup", testDirs[0]);
for (Path path : testDirs) {
mkdirs(path);
}
}
/**
* To get this project to compile under Hadoop 1, this code needs to be
* commented out
*
*
* @param fs filesystem
* @param dir dir
* @param subdir subdir
* @param recursive recurse?
* @throws IOException IO problems
*/
public static void assertListFilesFinds(FileSystem fs,
Path dir,
Path subdir,
boolean recursive) throws IOException {
RemoteIterator<LocatedFileStatus> iterator =
fs.listFiles(dir, recursive);
boolean found = false;
int entries = 0;
StringBuilder builder = new StringBuilder();
while (iterator.hasNext()) {
LocatedFileStatus next = iterator.next();
entries++;
builder.append(next.toString()).append('\n');
if (next.getPath().equals(subdir)) {
found = true;
}
}
assertTrue("Path " + subdir
+ " not found in directory " + dir + " : "
+ " entries=" + entries
+ " content"
+ builder.toString(),
found);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListFilesRootDir() throws Throwable {
Path dir = path("/");
Path child = new Path(dir, "test");
fs.delete(child, true);
SwiftTestUtils.writeTextFile(fs, child, "text", false);
assertListFilesFinds(fs, dir, child, false);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListFilesSubDir() throws Throwable {
createTestSubdirs();
Path dir = path("/test/subdir");
Path child = new Path(dir, "text.txt");
SwiftTestUtils.writeTextFile(fs, child, "text", false);
assertListFilesFinds(fs, dir, child, false);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListFilesRecursive() throws Throwable {
createTestSubdirs();
Path dir = path("/test/recursive");
Path child = new Path(dir, "hadoop/a/a.txt");
SwiftTestUtils.writeTextFile(fs, child, "text", false);
assertListFilesFinds(fs, dir, child, true);
}
}
| 4,184 | 31.192308 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/hdfs2/TestSwiftFileSystemDirectoriesHdfs2.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.hdfs2;
import org.apache.hadoop.fs.swift.TestSwiftFileSystemDirectories;
import org.apache.hadoop.fs.swift.snative.SwiftFileStatus;
/**
* Add some HDFS-2 only assertions to {@link TestSwiftFileSystemDirectories}
*/
public class TestSwiftFileSystemDirectoriesHdfs2 extends
TestSwiftFileSystemDirectories {
/**
* make assertions about fields that only appear in
* FileStatus in HDFS2
* @param stat status to look at
*/
protected void extraStatusAssertions(SwiftFileStatus stat) {
//HDFS2
assertTrue("isDirectory(): Not a directory: " + stat, stat.isDirectory());
assertFalse("isFile(): declares itself a file: " + stat, stat.isFile());
assertFalse("isFile(): declares itself a file: " + stat, stat.isSymlink());
}
}
| 1,659 | 36.727273 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/TestSwiftContractDelete.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestSwiftContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new SwiftContract(conf);
}
}
| 1,231 | 37.5 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/TestSwiftContractOpen.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestSwiftContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new SwiftContract(conf);
}
@Override
public void testOpenReadDir() throws Throwable {
ContractTestUtils.skip("Skipping object-store quirk");
}
@Override
public void testOpenReadDirWithChild() throws Throwable {
ContractTestUtils.skip("Skipping object-store quirk");
}
}
| 1,544 | 34.930233 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/SwiftContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem;
/**
* The contract of OpenStack Swift: only enabled if the test binding data is provided
*/
public class SwiftContract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "contract/swift.xml";
public SwiftContract(Configuration conf) {
super(conf);
//insert the base features
addConfResource(CONTRACT_XML);
}
@Override
public String getScheme() {
return SwiftNativeFileSystem.SWIFT;
}
}
| 1,487 | 32.066667 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/TestSwiftContractCreate.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestSwiftContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new SwiftContract(conf);
}
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
ContractTestUtils.skip("blobstores can't distinguish empty directories from files");
}
}
| 1,456 | 37.342105 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/TestSwiftContractSeek.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestSwiftContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new SwiftContract(conf);
}
}
| 1,225 | 37.3125 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/TestSwiftContractRootDir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* root dir operations against an S3 bucket
*/
public class TestSwiftContractRootDir extends
AbstractContractRootDirectoryTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new SwiftContract(conf);
}
}
| 1,302 | 35.194444 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/TestSwiftContractMkdir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* Test dir operations on S3
*/
public class TestSwiftContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new SwiftContract(conf);
}
}
| 1,265 | 35.171429 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/TestSwiftContractRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestSwiftContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new SwiftContract(conf);
}
}
| 1,232 | 36.363636 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/http/TestSwiftRestClient.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
import org.apache.commons.httpclient.Header;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.SwiftTestConstants;
import org.apache.hadoop.fs.swift.util.Duration;
import org.apache.hadoop.fs.swift.util.DurationStats;
import org.apache.hadoop.fs.swift.util.SwiftObjectPath;
import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
public class TestSwiftRestClient implements SwiftTestConstants {
private static final Log LOG =
LogFactory.getLog(TestSwiftRestClient.class);
private Configuration conf;
private boolean runTests;
private URI serviceURI;
@Before
public void setup() throws IOException {
conf = new Configuration();
runTests = SwiftTestUtils.hasServiceURI(conf);
if (runTests) {
serviceURI = SwiftTestUtils.getServiceURI(conf);
}
}
protected void assumeEnabled() {
Assume.assumeTrue(runTests);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testCreate() throws Throwable {
assumeEnabled();
SwiftRestClient client = createClient();
}
private SwiftRestClient createClient() throws IOException {
return SwiftRestClient.getInstance(serviceURI, conf);
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testAuthenticate() throws Throwable {
assumeEnabled();
SwiftRestClient client = createClient();
client.authenticate();
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testPutAndDelete() throws Throwable {
assumeEnabled();
SwiftRestClient client = createClient();
client.authenticate();
Path path = new Path("restTestPutAndDelete");
SwiftObjectPath sobject = SwiftObjectPath.fromPath(serviceURI, path);
byte[] stuff = new byte[1];
stuff[0] = 'a';
client.upload(sobject, new ByteArrayInputStream(stuff), stuff.length);
//check file exists
Duration head = new Duration();
Header[] responseHeaders = client.headRequest("expect success",
sobject,
SwiftRestClient.NEWEST);
head.finished();
LOG.info("head request duration " + head);
for (Header header: responseHeaders) {
LOG.info(header.toString());
}
//delete the file
client.delete(sobject);
//check file is gone
try {
Header[] headers = client.headRequest("expect fail",
sobject,
SwiftRestClient.NEWEST);
Assert.fail("Expected deleted file, but object is still present: "
+ sobject);
} catch (FileNotFoundException e) {
//expected
}
for (DurationStats stats: client.getOperationStatistics()) {
LOG.info(stats);
}
}
}
| 3,948 | 32.466102 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/http/TestRestClientBindings.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.swift.SwiftTestConstants;
import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Properties;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.DOT_AUTH_URL;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.DOT_PASSWORD;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.DOT_USERNAME;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_AUTH_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_CONTAINER_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_HTTPS_PORT_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_HTTP_PORT_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_PASSWORD_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_REGION_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_SERVICE_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_TENANT_PROPERTY;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.SWIFT_USERNAME_PROPERTY;
import static org.apache.hadoop.fs.swift.util.SwiftTestUtils.assertPropertyEquals;
public class TestRestClientBindings extends Assert
implements SwiftTestConstants {
private static final String SERVICE = "sname";
private static final String CONTAINER = "cname";
private static final String FS_URI = "swift://"
+ CONTAINER + "." + SERVICE + "/";
private static final String AUTH_URL = "http://localhost:8080/auth";
private static final String USER = "user";
private static final String PASS = "pass";
private static final String TENANT = "tenant";
private URI filesysURI;
private Configuration conf;
@Before
public void setup() throws URISyntaxException {
filesysURI = new URI(FS_URI);
conf = new Configuration(true);
setInstanceVal(conf, SERVICE, DOT_AUTH_URL, AUTH_URL);
setInstanceVal(conf, SERVICE, DOT_USERNAME, USER);
setInstanceVal(conf, SERVICE, DOT_PASSWORD, PASS);
}
private void setInstanceVal(Configuration conf,
String host,
String key,
String val) {
String instance = RestClientBindings.buildSwiftInstancePrefix(host);
String confkey = instance
+ key;
conf.set(confkey, val);
}
public void testPrefixBuilder() throws Throwable {
String built = RestClientBindings.buildSwiftInstancePrefix(SERVICE);
assertEquals("fs.swift.service." + SERVICE, built);
}
public void testBindAgainstConf() throws Exception {
Properties props = RestClientBindings.bind(filesysURI, conf);
assertPropertyEquals(props, SWIFT_CONTAINER_PROPERTY, CONTAINER);
assertPropertyEquals(props, SWIFT_SERVICE_PROPERTY, SERVICE);
assertPropertyEquals(props, SWIFT_AUTH_PROPERTY, AUTH_URL);
assertPropertyEquals(props, SWIFT_AUTH_PROPERTY, AUTH_URL);
assertPropertyEquals(props, SWIFT_USERNAME_PROPERTY, USER);
assertPropertyEquals(props, SWIFT_PASSWORD_PROPERTY, PASS);
assertPropertyEquals(props, SWIFT_TENANT_PROPERTY, null);
assertPropertyEquals(props, SWIFT_REGION_PROPERTY, null);
assertPropertyEquals(props, SWIFT_HTTP_PORT_PROPERTY, null);
assertPropertyEquals(props, SWIFT_HTTPS_PORT_PROPERTY, null);
}
public void expectBindingFailure(URI fsURI, Configuration config) {
try {
Properties binding = RestClientBindings.bind(fsURI, config);
//if we get here, binding didn't fail- there is something else.
//list the properties but not the values.
StringBuilder details = new StringBuilder() ;
for (Object key: binding.keySet()) {
details.append(key.toString()).append(" ");
}
fail("Expected a failure, got the binding [ "+ details+"]");
} catch (SwiftConfigurationException expected) {
}
}
public void testBindAgainstConfMissingInstance() throws Exception {
Configuration badConf = new Configuration();
expectBindingFailure(filesysURI, badConf);
}
/* Hadoop 2.x+ only, as conf.unset() isn't a v1 feature
public void testBindAgainstConfIncompleteInstance() throws Exception {
String instance = RestClientBindings.buildSwiftInstancePrefix(SERVICE);
conf.unset(instance + DOT_PASSWORD);
expectBindingFailure(filesysURI, conf);
}
*/
@Test(expected = SwiftConfigurationException.class)
public void testDottedServiceURL() throws Exception {
RestClientBindings.bind(new URI("swift://hadoop.apache.org/"), conf);
}
@Test(expected = SwiftConfigurationException.class)
public void testMissingServiceURL() throws Exception {
RestClientBindings.bind(new URI("swift:///"), conf);
}
/**
* inner test method that expects container extraction to fail
* -if not prints a meaningful error message.
*
* @param hostname hostname to parse
*/
private static void expectExtractContainerFail(String hostname) {
try {
String container = RestClientBindings.extractContainerName(hostname);
fail("Expected an error -got a container of '" + container
+ "' from " + hostname);
} catch (SwiftConfigurationException expected) {
//expected
}
}
/**
* inner test method that expects service extraction to fail
* -if not prints a meaningful error message.
*
* @param hostname hostname to parse
*/
public static void expectExtractServiceFail(String hostname) {
try {
String service = RestClientBindings.extractServiceName(hostname);
fail("Expected an error -got a service of '" + service
+ "' from " + hostname);
} catch (SwiftConfigurationException expected) {
//expected
}
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testEmptyHostname() throws Throwable {
expectExtractContainerFail("");
expectExtractServiceFail("");
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testDot() throws Throwable {
expectExtractContainerFail(".");
expectExtractServiceFail(".");
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSimple() throws Throwable {
expectExtractContainerFail("simple");
expectExtractServiceFail("simple");
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testTrailingDot() throws Throwable {
expectExtractServiceFail("simple.");
}
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLeadingDot() throws Throwable {
expectExtractServiceFail(".leading");
}
}
| 7,671 | 37.552764 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/ApiKeyAuthenticationRequest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
import org.codehaus.jackson.annotate.JsonProperty;
/**
* Class that represents authentication request to Openstack Keystone.
* Contains basic authentication information.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS
*/
public class ApiKeyAuthenticationRequest extends AuthenticationRequest {
/**
* Credentials for login
*/
private ApiKeyCredentials apiKeyCredentials;
/**
* API key auth
* @param tenantName tenant
* @param apiKeyCredentials credentials
*/
public ApiKeyAuthenticationRequest(String tenantName, ApiKeyCredentials apiKeyCredentials) {
this.tenantName = tenantName;
this.apiKeyCredentials = apiKeyCredentials;
}
/**
* @return credentials for login into Keystone
*/
@JsonProperty("RAX-KSKEY:apiKeyCredentials")
public ApiKeyCredentials getApiKeyCredentials() {
return apiKeyCredentials;
}
/**
* @param apiKeyCredentials credentials for login into Keystone
*/
public void setApiKeyCredentials(ApiKeyCredentials apiKeyCredentials) {
this.apiKeyCredentials = apiKeyCredentials;
}
@Override
public String toString() {
return "Auth as " +
"tenant '" + tenantName + "' "
+ apiKeyCredentials;
}
}
| 2,134 | 30.865672 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/KeystoneApiKeyCredentials.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* Class for Keystone authentication.
* Used when {@link ApiKeyCredentials} is not applicable
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class KeystoneApiKeyCredentials {
/**
* User access key
*/
private String accessKey;
/**
* User access secret
*/
private String secretKey;
public KeystoneApiKeyCredentials(String accessKey, String secretKey) {
this.accessKey = accessKey;
this.secretKey = secretKey;
}
public String getAccessKey() {
return accessKey;
}
public void setAccessKey(String accessKey) {
this.accessKey = accessKey;
}
public String getSecretKey() {
return secretKey;
}
public void setSecretKey(String secretKey) {
this.secretKey = secretKey;
}
@Override
public String toString() {
return "user " +
"'" + accessKey + '\'' +
" with key of length " + ((secretKey == null) ? 0 : secretKey.length());
}
}
| 1,861 | 26.791045 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/AuthenticationRequestWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* This class is used for correct hierarchy mapping of
* Keystone authentication model and java code.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class AuthenticationRequestWrapper {
/**
* authentication request
*/
private AuthenticationRequest auth;
/**
* default constructor used for json parsing
*/
public AuthenticationRequestWrapper() {
}
/**
* @param auth authentication requests
*/
public AuthenticationRequestWrapper(AuthenticationRequest auth) {
this.auth = auth;
}
/**
* @return authentication request
*/
public AuthenticationRequest getAuth() {
return auth;
}
/**
* @param auth authentication request
*/
public void setAuth(AuthenticationRequest auth) {
this.auth = auth;
}
}
| 1,690 | 27.183333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/PasswordCredentials.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* Describes credentials to log in Swift using Keystone authentication.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class PasswordCredentials {
/**
* user login
*/
private String username;
/**
* user password
*/
private String password;
/**
* default constructor
*/
public PasswordCredentials() {
}
/**
* @param username user login
* @param password user password
*/
public PasswordCredentials(String username, String password) {
this.username = username;
this.password = password;
}
/**
* @return user password
*/
public String getPassword() {
return password;
}
/**
* @param password user password
*/
public void setPassword(String password) {
this.password = password;
}
/**
* @return login
*/
public String getUsername() {
return username;
}
/**
* @param username login
*/
public void setUsername(String username) {
this.username = username;
}
@Override
public String toString() {
return "user '" + username + '\'' +
" with password of length " + ((password == null) ? 0 : password.length());
}
}
| 2,080 | 22.647727 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/KeyStoneAuthRequest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* Class that represents authentication to OpenStack Keystone.
* Contains basic authentication information.
* Used when {@link ApiKeyAuthenticationRequest} is not applicable.
* (problem with different Keystone installations/versions/modifications)
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class KeyStoneAuthRequest extends AuthenticationRequest {
/**
* Credentials for Keystone authentication
*/
private KeystoneApiKeyCredentials apiAccessKeyCredentials;
/**
* @param tenant Keystone tenant name for authentication
* @param apiAccessKeyCredentials Credentials for authentication
*/
public KeyStoneAuthRequest(String tenant, KeystoneApiKeyCredentials apiAccessKeyCredentials) {
this.apiAccessKeyCredentials = apiAccessKeyCredentials;
this.tenantName = tenant;
}
public KeystoneApiKeyCredentials getApiAccessKeyCredentials() {
return apiAccessKeyCredentials;
}
public void setApiAccessKeyCredentials(KeystoneApiKeyCredentials apiAccessKeyCredentials) {
this.apiAccessKeyCredentials = apiAccessKeyCredentials;
}
@Override
public String toString() {
return "KeyStoneAuthRequest as " +
"tenant '" + tenantName + "' "
+ apiAccessKeyCredentials;
}
}
| 2,194 | 35.583333 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/AuthenticationRequest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* Class that represents authentication request to Openstack Keystone.
* Contains basic authentication information.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class AuthenticationRequest {
/**
* tenant name
*/
protected String tenantName;
public AuthenticationRequest() {
}
/**
* @return tenant name for Keystone authorization
*/
public String getTenantName() {
return tenantName;
}
/**
* @param tenantName tenant name for authorization
*/
public void setTenantName(String tenantName) {
this.tenantName = tenantName;
}
@Override
public String toString() {
return "AuthenticationRequest{" +
"tenantName='" + tenantName + '\'' +
'}';
}
}
| 1,648 | 27.431034 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/Roles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* Describes user roles in Openstack system.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class Roles {
/**
* role name
*/
private String name;
/**
* This field user in RackSpace auth model
*/
private String id;
/**
* This field user in RackSpace auth model
*/
private String description;
/**
* Service id used in HP public Cloud
*/
private String serviceId;
/**
* Service id used in HP public Cloud
*/
private String tenantId;
/**
* @return role name
*/
public String getName() {
return name;
}
/**
* @param name role name
*/
public void setName(String name) {
this.name = name;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getServiceId() {
return serviceId;
}
public void setServiceId(String serviceId) {
this.serviceId = serviceId;
}
public String getTenantId() {
return tenantId;
}
public void setTenantId(String tenantId) {
this.tenantId = tenantId;
}
}
| 2,139 | 20.836735 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/ApiKeyCredentials.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* Describes credentials to log in Swift using Keystone authentication.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class ApiKeyCredentials {
/**
* user login
*/
private String username;
/**
* user password
*/
private String apikey;
/**
* default constructor
*/
public ApiKeyCredentials() {
}
/**
* @param username user login
* @param apikey user api key
*/
public ApiKeyCredentials(String username, String apikey) {
this.username = username;
this.apikey = apikey;
}
/**
* @return user api key
*/
public String getApiKey() {
return apikey;
}
/**
* @param apikey user api key
*/
public void setApiKey(String apikey) {
this.apikey = apikey;
}
/**
* @return login
*/
public String getUsername() {
return username;
}
/**
* @param username login
*/
public void setUsername(String username) {
this.username = username;
}
@Override
public String toString() {
return "user " +
"'" + username + '\'' +
" with key of length " + ((apikey == null) ? 0 : apikey.length());
}
}
| 2,064 | 22.465909 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/AuthenticationWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* This class is used for correct hierarchy mapping of
* Keystone authentication model and java code
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class AuthenticationWrapper {
/**
* authentication response field
*/
private AuthenticationResponse access;
/**
* @return authentication response
*/
public AuthenticationResponse getAccess() {
return access;
}
/**
* @param access sets authentication response
*/
public void setAccess(AuthenticationResponse access) {
this.access = access;
}
}
| 1,462 | 29.479167 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/PasswordAuthenticationRequest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
/**
* Class that represents authentication request to Openstack Keystone.
* Contains basic authentication information.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class PasswordAuthenticationRequest extends AuthenticationRequest {
/**
* Credentials for login
*/
private PasswordCredentials passwordCredentials;
/**
* @param tenantName tenant
* @param passwordCredentials password credentials
*/
public PasswordAuthenticationRequest(String tenantName, PasswordCredentials passwordCredentials) {
this.tenantName = tenantName;
this.passwordCredentials = passwordCredentials;
}
/**
* @return credentials for login into Keystone
*/
public PasswordCredentials getPasswordCredentials() {
return passwordCredentials;
}
/**
* @param passwordCredentials credentials for login into Keystone
*/
public void setPasswordCredentials(PasswordCredentials passwordCredentials) {
this.passwordCredentials = passwordCredentials;
}
@Override
public String toString() {
return "Authenticate as " +
"tenant '" + tenantName + "' "
+ passwordCredentials;
}
}
| 2,073 | 31.920635 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/AuthenticationResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth;
import org.apache.hadoop.fs.swift.auth.entities.AccessToken;
import org.apache.hadoop.fs.swift.auth.entities.Catalog;
import org.apache.hadoop.fs.swift.auth.entities.User;
import java.util.List;
/**
* Response from KeyStone deserialized into AuthenticationResponse class.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
public class AuthenticationResponse {
private Object metadata;
private List<Catalog> serviceCatalog;
private User user;
private AccessToken token;
public Object getMetadata() {
return metadata;
}
public void setMetadata(Object metadata) {
this.metadata = metadata;
}
public List<Catalog> getServiceCatalog() {
return serviceCatalog;
}
public void setServiceCatalog(List<Catalog> serviceCatalog) {
this.serviceCatalog = serviceCatalog;
}
public User getUser() {
return user;
}
public void setUser(User user) {
this.user = user;
}
public AccessToken getToken() {
return token;
}
public void setToken(AccessToken token) {
this.token = token;
}
}
| 1,959 | 27 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Endpoint.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth.entities;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import java.net.URI;
/**
* Openstack Swift endpoint description.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class Endpoint {
/**
* endpoint id
*/
private String id;
/**
* Keystone admin URL
*/
private URI adminURL;
/**
* Keystone internal URL
*/
private URI internalURL;
/**
* public accessible URL
*/
private URI publicURL;
/**
* public accessible URL#2
*/
private URI publicURL2;
/**
* Openstack region name
*/
private String region;
/**
* This field is used in RackSpace authentication model
*/
private String tenantId;
/**
* This field user in RackSpace auth model
*/
private String versionId;
/**
* This field user in RackSpace auth model
*/
private String versionInfo;
/**
* This field user in RackSpace auth model
*/
private String versionList;
/**
* @return endpoint id
*/
public String getId() {
return id;
}
/**
* @param id endpoint id
*/
public void setId(String id) {
this.id = id;
}
/**
* @return Keystone admin URL
*/
public URI getAdminURL() {
return adminURL;
}
/**
* @param adminURL Keystone admin URL
*/
public void setAdminURL(URI adminURL) {
this.adminURL = adminURL;
}
/**
* @return internal Keystone
*/
public URI getInternalURL() {
return internalURL;
}
/**
* @param internalURL Keystone internal URL
*/
public void setInternalURL(URI internalURL) {
this.internalURL = internalURL;
}
/**
* @return public accessible URL
*/
public URI getPublicURL() {
return publicURL;
}
/**
* @param publicURL public URL
*/
public void setPublicURL(URI publicURL) {
this.publicURL = publicURL;
}
public URI getPublicURL2() {
return publicURL2;
}
public void setPublicURL2(URI publicURL2) {
this.publicURL2 = publicURL2;
}
/**
* @return Openstack region name
*/
public String getRegion() {
return region;
}
/**
* @param region Openstack region name
*/
public void setRegion(String region) {
this.region = region;
}
public String getTenantId() {
return tenantId;
}
public void setTenantId(String tenantId) {
this.tenantId = tenantId;
}
public String getVersionId() {
return versionId;
}
public void setVersionId(String versionId) {
this.versionId = versionId;
}
public String getVersionInfo() {
return versionInfo;
}
public void setVersionInfo(String versionInfo) {
this.versionInfo = versionInfo;
}
public String getVersionList() {
return versionList;
}
public void setVersionList(String versionList) {
this.versionList = versionList;
}
}
| 3,757 | 18.271795 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Tenant.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth.entities;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
/**
* Tenant is abstraction in Openstack which describes all account
* information and user privileges in system.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class Tenant {
/**
* tenant id
*/
private String id;
/**
* tenant short description which Keystone returns
*/
private String description;
/**
* boolean enabled user account or no
*/
private boolean enabled;
/**
* tenant human readable name
*/
private String name;
/**
* @return tenant name
*/
public String getName() {
return name;
}
/**
* @param name tenant name
*/
public void setName(String name) {
this.name = name;
}
/**
* @return true if account enabled and false otherwise
*/
public boolean isEnabled() {
return enabled;
}
/**
* @param enabled enable or disable
*/
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
/**
* @return account short description
*/
public String getDescription() {
return description;
}
/**
* @param description set account description
*/
public void setDescription(String description) {
this.description = description;
}
/**
* @return set tenant id
*/
public String getId() {
return id;
}
/**
* @param id tenant id
*/
public void setId(String id) {
this.id = id;
}
}
| 2,396 | 21.194444 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Catalog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth.entities;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import java.util.List;
/**
* Describes Openstack Swift REST endpoints.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class Catalog {
/**
* List of valid swift endpoints
*/
private List<Endpoint> endpoints;
/**
* endpoint links are additional information description
* which aren't used in Hadoop and Swift integration scope
*/
private List<Object> endpoints_links;
/**
* Openstack REST service name. In our case name = "keystone"
*/
private String name;
/**
* Type of REST service. In our case type = "identity"
*/
private String type;
/**
* @return List of endpoints
*/
public List<Endpoint> getEndpoints() {
return endpoints;
}
/**
* @param endpoints list of endpoints
*/
public void setEndpoints(List<Endpoint> endpoints) {
this.endpoints = endpoints;
}
/**
* @return list of endpoint links
*/
public List<Object> getEndpoints_links() {
return endpoints_links;
}
/**
* @param endpoints_links list of endpoint links
*/
public void setEndpoints_links(List<Object> endpoints_links) {
this.endpoints_links = endpoints_links;
}
/**
* @return name of Openstack REST service
*/
public String getName() {
return name;
}
/**
* @param name of Openstack REST service
*/
public void setName(String name) {
this.name = name;
}
/**
* @return type of Openstack REST service
*/
public String getType() {
return type;
}
/**
* @param type of REST service
*/
public void setType(String type) {
this.type = type;
}
}
| 2,618 | 23.25 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/AccessToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth.entities;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
/**
* Access token representation of Openstack Keystone authentication.
* Class holds token id, tenant and expiration time.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*
* Example:
* <pre>
* "token" : {
* "RAX-AUTH:authenticatedBy" : [ "APIKEY" ],
* "expires" : "2013-07-12T05:19:24.685-05:00",
* "id" : "8bbea4215113abdab9d4c8fb0d37",
* "tenant" : { "id" : "01011970",
* "name" : "77777"
* }
* }
* </pre>
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class AccessToken {
/**
* token expiration time
*/
private String expires;
/**
* token id
*/
private String id;
/**
* tenant name for whom id is attached
*/
private Tenant tenant;
/**
* @return token expiration time
*/
public String getExpires() {
return expires;
}
/**
* @param expires the token expiration time
*/
public void setExpires(String expires) {
this.expires = expires;
}
/**
* @return token value
*/
public String getId() {
return id;
}
/**
* @param id token value
*/
public void setId(String id) {
this.id = id;
}
/**
* @return tenant authenticated in Openstack Keystone
*/
public Tenant getTenant() {
return tenant;
}
/**
* @param tenant tenant authenticated in Openstack Keystone
*/
public void setTenant(Tenant tenant) {
this.tenant = tenant;
}
@Override
public String toString() {
return "AccessToken{" +
"id='" + id + '\'' +
", tenant=" + tenant +
", expires='" + expires + '\'' +
'}';
}
}
| 2,570 | 22.805556 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/User.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.auth.entities;
import org.apache.hadoop.fs.swift.auth.Roles;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import java.util.List;
/**
* Describes user entity in Keystone
* In different Swift installations User is represented differently.
* To avoid any JSON deserialization failures this entity is ignored.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class User {
/**
* user id in Keystone
*/
private String id;
/**
* user human readable name
*/
private String name;
/**
* user roles in Keystone
*/
private List<Roles> roles;
/**
* links to user roles
*/
private List<Object> roles_links;
/**
* human readable username in Keystone
*/
private String username;
/**
* @return user id
*/
public String getId() {
return id;
}
/**
* @param id user id
*/
public void setId(String id) {
this.id = id;
}
/**
* @return user name
*/
public String getName() {
return name;
}
/**
* @param name user name
*/
public void setName(String name) {
this.name = name;
}
/**
* @return user roles
*/
public List<Roles> getRoles() {
return roles;
}
/**
* @param roles sets user roles
*/
public void setRoles(List<Roles> roles) {
this.roles = roles;
}
/**
* @return user roles links
*/
public List<Object> getRoles_links() {
return roles_links;
}
/**
* @param roles_links user roles links
*/
public void setRoles_links(List<Object> roles_links) {
this.roles_links = roles_links;
}
/**
* @return username
*/
public String getUsername() {
return username;
}
/**
* @param username human readable user name
*/
public void setUsername(String username) {
this.username = username;
}
}
| 2,761 | 19.766917 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/DurationStatsTable.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.util;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Build a duration stats table to which you can add statistics.
* Designed to be multithreaded
*/
public class DurationStatsTable {
private Map<String,DurationStats> statsTable
= new HashMap<String, DurationStats>(6);
/**
* Add an operation
* @param operation operation name
* @param duration duration
*/
public void add(String operation, Duration duration, boolean success) {
DurationStats durationStats;
String key = operation;
if (!success) {
key += "-FAIL";
}
synchronized (this) {
durationStats = statsTable.get(key);
if (durationStats == null) {
durationStats = new DurationStats(key);
statsTable.put(key, durationStats);
}
}
synchronized (durationStats) {
durationStats.add(duration);
}
}
/**
* Get the current duration statistics
* @return a snapshot of the statistics
*/
public synchronized List<DurationStats> getDurationStatistics() {
List<DurationStats> results = new ArrayList<DurationStats>(statsTable.size());
for (DurationStats stat: statsTable.values()) {
results.add(new DurationStats(stat));
}
return results;
}
/**
* reset the values of the statistics. This doesn't delete them, merely zeroes them.
*/
public synchronized void reset() {
for (DurationStats stat : statsTable.values()) {
stat.reset();
}
}
}
| 2,357 | 29.230769 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.util;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* Various utility classes for SwiftFS support
*/
public final class SwiftUtils {
public static final String READ = "read(buffer, offset, length)";
/**
* Join two (non null) paths, inserting a forward slash between them
* if needed
*
* @param path1 first path
* @param path2 second path
* @return the combined path
*/
public static String joinPaths(String path1, String path2) {
StringBuilder result =
new StringBuilder(path1.length() + path2.length() + 1);
result.append(path1);
boolean insertSlash = true;
if (path1.endsWith("/")) {
insertSlash = false;
} else if (path2.startsWith("/")) {
insertSlash = false;
}
if (insertSlash) {
result.append("/");
}
result.append(path2);
return result.toString();
}
/**
* This test contains the is-directory logic for Swift, so if
* changed there is only one place for it.
*
* @param fileStatus status to examine
* @return true if we consider this status to be representative of a
* directory.
*/
public static boolean isDirectory(FileStatus fileStatus) {
return fileStatus.isDirectory() || isFilePretendingToBeDirectory(fileStatus);
}
/**
* Test for the entry being a file that is treated as if it is a
* directory
*
* @param fileStatus status
* @return true if it meets the rules for being a directory
*/
public static boolean isFilePretendingToBeDirectory(FileStatus fileStatus) {
return fileStatus.getLen() == 0;
}
/**
* Predicate: Is a swift object referring to the root direcory?
* @param swiftObject object to probe
* @return true iff the object refers to the root
*/
public static boolean isRootDir(SwiftObjectPath swiftObject) {
return swiftObject.objectMatches("") || swiftObject.objectMatches("/");
}
/**
* Sprintf() to the log iff the log is at debug level. If the log
* is not at debug level, the printf operation is skipped, so
* no time is spent generating the string.
* @param log log to use
* @param text text message
* @param args args arguments to the print statement
*/
public static void debug(Log log, String text, Object... args) {
if (log.isDebugEnabled()) {
log.debug(String.format(text, args));
}
}
/**
* Log an exception (in text and trace) iff the log is at debug
* @param log Log to use
* @param text text message
* @param ex exception
*/
public static void debugEx(Log log, String text, Exception ex) {
if (log.isDebugEnabled()) {
log.debug(text + ex, ex);
}
}
/**
* Sprintf() to the log iff the log is at trace level. If the log
* is not at trace level, the printf operation is skipped, so
* no time is spent generating the string.
* @param log log to use
* @param text text message
* @param args args arguments to the print statement
*/
public static void trace(Log log, String text, Object... args) {
if (log.isTraceEnabled()) {
log.trace(String.format(text, args));
}
}
/**
* Given a partition number, calculate the partition value.
* This is used in the SwiftNativeOutputStream, and is placed
* here for tests to be able to calculate the filename of
* a partition.
* @param partNumber part number
* @return a string to use as the filename
*/
public static String partitionFilenameFromNumber(int partNumber) {
return String.format("%06d", partNumber);
}
/**
* List a a path to string
* @param fileSystem filesystem
* @param path directory
* @return a listing of the filestatuses of elements in the directory, one
* to a line, precedeed by the full path of the directory
* @throws IOException connectivity problems
*/
public static String ls(FileSystem fileSystem, Path path) throws
IOException {
if (path == null) {
//surfaces when someone calls getParent() on something at the top of the path
return "/";
}
FileStatus[] stats;
String pathtext = "ls " + path;
try {
stats = fileSystem.listStatus(path);
} catch (FileNotFoundException e) {
return pathtext + " -file not found";
} catch (IOException e) {
return pathtext + " -failed: " + e;
}
return pathtext + fileStatsToString(stats, "\n");
}
/**
* Take an array of filestats and convert to a string (prefixed w/ a [01] counter
* @param stats array of stats
* @param separator separator after every entry
* @return a stringified set
*/
public static String fileStatsToString(FileStatus[] stats, String separator) {
StringBuilder buf = new StringBuilder(stats.length * 128);
for (int i = 0; i < stats.length; i++) {
buf.append(String.format("[%02d] %s", i, stats[i])).append(separator);
}
return buf.toString();
}
/**
* Verify that the basic args to a read operation are valid;
* throws an exception if not -with meaningful text includeing
* @param buffer destination buffer
* @param off offset
* @param len number of bytes to read
* @throws NullPointerException null buffer
* @throws IndexOutOfBoundsException on any invalid range.
*/
public static void validateReadArgs(byte[] buffer, int off, int len) {
if (buffer == null) {
throw new NullPointerException("Null byte array in"+ READ);
}
if (off < 0 ) {
throw new IndexOutOfBoundsException("Negative buffer offset "
+ off
+ " in " + READ);
}
if (len < 0 ) {
throw new IndexOutOfBoundsException("Negative read length "
+ len
+ " in " + READ);
}
if (off > buffer.length) {
throw new IndexOutOfBoundsException("Buffer offset of "
+ off
+ "beyond buffer size of "
+ buffer.length
+ " in " + READ);
}
}
}
| 7,231 | 32.327189 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/Duration.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.util;
public class Duration {
private final long started;
private long finished;
public Duration() {
started = time();
finished = started;
}
private long time() {
return System.currentTimeMillis();
}
public void finished() {
finished = time();
}
public String getDurationString() {
return humanTime(value());
}
public static String humanTime(long time) {
long seconds = (time / 1000);
long minutes = (seconds / 60);
return String.format("%d:%02d:%03d", minutes, seconds % 60, time % 1000);
}
@Override
public String toString() {
return getDurationString();
}
public long value() {
return finished -started;
}
}
| 1,549 | 25.724138 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/JSONUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.util;
import org.apache.hadoop.fs.swift.exceptions.SwiftJsonMarshallingException;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.type.CollectionType;
import org.codehaus.jackson.type.TypeReference;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
public class JSONUtil {
private static ObjectMapper jsonMapper = new ObjectMapper();
/**
* Private constructor.
*/
private JSONUtil() {
}
/**
* Converting object to JSON string. If errors appears throw
* MeshinException runtime exception.
*
* @param object The object to convert.
* @return The JSON string representation.
* @throws IOException IO issues
* @throws SwiftJsonMarshallingException failure to generate JSON
*/
public static String toJSON(Object object) throws
IOException {
Writer json = new StringWriter();
try {
jsonMapper.writeValue(json, object);
return json.toString();
} catch (JsonGenerationException e) {
throw new SwiftJsonMarshallingException(e.toString(), e);
} catch (JsonMappingException e) {
throw new SwiftJsonMarshallingException(e.toString(), e);
}
}
/**
* Convert string representation to object. If errors appears throw
* Exception runtime exception.
*
* @param value The JSON string.
* @param klazz The class to convert.
* @return The Object of the given class.
*/
public static <T> T toObject(String value, Class<T> klazz) throws
IOException {
try {
return jsonMapper.readValue(value, klazz);
} catch (JsonGenerationException e) {
throw new SwiftJsonMarshallingException(e.toString()
+ " source: " + value,
e);
} catch (JsonMappingException e) {
throw new SwiftJsonMarshallingException(e.toString()
+ " source: " + value,
e);
}
}
/**
* @param value json string
* @param typeReference class type reference
* @param <T> type
* @return deserialized T object
*/
@SuppressWarnings("unchecked")
public static <T> T toObject(String value,
final TypeReference<T> typeReference)
throws IOException {
try {
return (T)jsonMapper.readValue(value, typeReference);
} catch (JsonGenerationException e) {
throw new SwiftJsonMarshallingException("Error generating response", e);
} catch (JsonMappingException e) {
throw new SwiftJsonMarshallingException("Error generating response", e);
}
}
/**
* @param value json string
* @param collectionType class describing how to deserialize collection of objects
* @param <T> type
* @return deserialized T object
*/
@SuppressWarnings("unchecked")
public static <T> T toObject(String value,
final CollectionType collectionType)
throws IOException {
try {
return (T)jsonMapper.readValue(value, collectionType);
} catch (JsonGenerationException e) {
throw new SwiftJsonMarshallingException(e.toString()
+ " source: " + value,
e);
} catch (JsonMappingException e) {
throw new SwiftJsonMarshallingException(e.toString()
+ " source: " + value,
e);
}
}
public static ObjectMapper getJsonMapper() {
return jsonMapper;
}
}
| 4,713 | 34.443609 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftObjectPath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.util;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
import org.apache.hadoop.fs.swift.http.RestClientBindings;
import java.net.URI;
import java.util.regex.Pattern;
/**
* Swift hierarchy mapping of (container, path)
*/
public final class SwiftObjectPath {
private static final Pattern PATH_PART_PATTERN = Pattern.compile(".*/AUTH_\\w*/");
/**
* Swift container
*/
private final String container;
/**
* swift object
*/
private final String object;
private final String uriPath;
/**
* Build an instance from a (host, object) pair
*
* @param container container name
* @param object object ref underneath the container
*/
public SwiftObjectPath(String container, String object) {
if (object == null) {
throw new IllegalArgumentException("object name can't be null");
}
this.container = container;
this.object = URI.create(object).getPath();
uriPath = buildUriPath();
}
public String getContainer() {
return container;
}
public String getObject() {
return object;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof SwiftObjectPath)) return false;
final SwiftObjectPath that = (SwiftObjectPath) o;
return this.toUriPath().equals(that.toUriPath());
}
@Override
public int hashCode() {
int result = container.hashCode();
result = 31 * result + object.hashCode();
return result;
}
private String buildUriPath() {
return SwiftUtils.joinPaths(container, object);
}
public String toUriPath() {
return uriPath;
}
@Override
public String toString() {
return toUriPath();
}
/**
* Test for the object matching a path, ignoring the container
* value.
*
* @param path path string
* @return true iff the object's name matches the path
*/
public boolean objectMatches(String path) {
return object.equals(path);
}
/**
* Query to see if the possibleChild object is a child path of this.
* object.
*
* The test is done by probing for the path of the this object being
* at the start of the second -with a trailing slash, and both
* containers being equal
*
* @param possibleChild possible child dir
* @return true iff the possibleChild is under this object
*/
public boolean isEqualToOrParentOf(SwiftObjectPath possibleChild) {
String origPath = toUriPath();
String path = origPath;
if (!path.endsWith("/")) {
path = path + "/";
}
String childPath = possibleChild.toUriPath();
return childPath.equals(origPath) || childPath.startsWith(path);
}
/**
* Create a path tuple of (container, path), where the container is
* chosen from the host of the URI.
*
* @param uri uri to start from
* @param path path underneath
* @return a new instance.
* @throws SwiftConfigurationException if the URI host doesn't parse into
* container.service
*/
public static SwiftObjectPath fromPath(URI uri,
Path path)
throws SwiftConfigurationException {
return fromPath(uri, path, false);
}
/**
* Create a path tuple of (container, path), where the container is
* chosen from the host of the URI.
* A trailing slash can be added to the path. This is the point where
* these /-es need to be appended, because when you construct a {@link Path}
* instance, {@link Path#normalizePath(String, String)} is called
* -which strips off any trailing slash.
*
* @param uri uri to start from
* @param path path underneath
* @param addTrailingSlash should a trailing slash be added if there isn't one.
* @return a new instance.
* @throws SwiftConfigurationException if the URI host doesn't parse into
* container.service
*/
public static SwiftObjectPath fromPath(URI uri,
Path path,
boolean addTrailingSlash)
throws SwiftConfigurationException {
String url =
path.toUri().getPath().replaceAll(PATH_PART_PATTERN.pattern(), "");
//add a trailing slash if needed
if (addTrailingSlash && !url.endsWith("/")) {
url += "/";
}
String container = uri.getHost();
if (container == null) {
//no container, not good: replace with ""
container = "";
} else if (container.contains(".")) {
//its a container.service URI. Strip the container
container = RestClientBindings.extractContainerName(container);
}
return new SwiftObjectPath(container, url);
}
}
| 5,609 | 28.840426 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
import org.junit.internal.AssumptionViolatedException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Properties;
/**
* Utilities used across test cases
*/
public class SwiftTestUtils extends org.junit.Assert {
private static final Log LOG =
LogFactory.getLog(SwiftTestUtils.class);
public static final String TEST_FS_SWIFT = "test.fs.swift.name";
public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
/**
* Get the test URI
* @param conf configuration
* @throws SwiftConfigurationException missing parameter or bad URI
*/
public static URI getServiceURI(Configuration conf) throws
SwiftConfigurationException {
String instance = conf.get(TEST_FS_SWIFT);
if (instance == null) {
throw new SwiftConfigurationException(
"Missing configuration entry " + TEST_FS_SWIFT);
}
try {
return new URI(instance);
} catch (URISyntaxException e) {
throw new SwiftConfigurationException("Bad URI: " + instance);
}
}
public static boolean hasServiceURI(Configuration conf) {
String instance = conf.get(TEST_FS_SWIFT);
return instance != null;
}
/**
* Assert that a property in the property set matches the expected value
* @param props property set
* @param key property name
* @param expected expected value. If null, the property must not be in the set
*/
public static void assertPropertyEquals(Properties props,
String key,
String expected) {
String val = props.getProperty(key);
if (expected == null) {
assertNull("Non null property " + key + " = " + val, val);
} else {
assertEquals("property " + key + " = " + val,
expected,
val);
}
}
/**
*
* Write a file and read it in, validating the result. Optional flags control
* whether file overwrite operations should be enabled, and whether the
* file should be deleted afterwards.
*
* If there is a mismatch between what was written and what was expected,
* a small range of bytes either side of the first error are logged to aid
* diagnosing what problem occurred -whether it was a previous file
* or a corrupting of the current file. This assumes that two
* sequential runs to the same path use datasets with different character
* moduli.
*
* @param fs filesystem
* @param path path to write to
* @param len length of data
* @param overwrite should the create option allow overwrites?
* @param delete should the file be deleted afterwards? -with a verification
* that it worked. Deletion is not attempted if an assertion has failed
* earlier -it is not in a <code>finally{}</code> block.
* @throws IOException IO problems
*/
public static void writeAndRead(FileSystem fs,
Path path,
byte[] src,
int len,
int blocksize,
boolean overwrite,
boolean delete) throws IOException {
fs.mkdirs(path.getParent());
writeDataset(fs, path, src, len, blocksize, overwrite);
byte[] dest = readDataset(fs, path, len);
compareByteArrays(src, dest, len);
if (delete) {
boolean deleted = fs.delete(path, false);
assertTrue("Deleted", deleted);
assertPathDoesNotExist(fs, "Cleanup failed", path);
}
}
/**
* Write a file.
* Optional flags control
* whether file overwrite operations should be enabled
* @param fs filesystem
* @param path path to write to
* @param len length of data
* @param overwrite should the create option allow overwrites?
* @throws IOException IO problems
*/
public static void writeDataset(FileSystem fs,
Path path,
byte[] src,
int len,
int blocksize,
boolean overwrite) throws IOException {
assertTrue(
"Not enough data in source array to write " + len + " bytes",
src.length >= len);
FSDataOutputStream out = fs.create(path,
overwrite,
fs.getConf()
.getInt(IO_FILE_BUFFER_SIZE,
4096),
(short) 1,
blocksize);
out.write(src, 0, len);
out.close();
assertFileHasLength(fs, path, len);
}
/**
* Read the file and convert to a byte dataaset
* @param fs filesystem
* @param path path to read from
* @param len length of data to read
* @return the bytes
* @throws IOException IO problems
*/
public static byte[] readDataset(FileSystem fs, Path path, int len)
throws IOException {
FSDataInputStream in = fs.open(path);
byte[] dest = new byte[len];
try {
in.readFully(0, dest);
} finally {
in.close();
}
return dest;
}
/**
* Assert that tthe array src[0..len] and dest[] are equal
* @param src source data
* @param dest actual
* @param len length of bytes to compare
*/
public static void compareByteArrays(byte[] src,
byte[] dest,
int len) {
assertEquals("Number of bytes read != number written",
len, dest.length);
int errors = 0;
int first_error_byte = -1;
for (int i = 0; i < len; i++) {
if (src[i] != dest[i]) {
if (errors == 0) {
first_error_byte = i;
}
errors++;
}
}
if (errors > 0) {
String message = String.format(" %d errors in file of length %d",
errors, len);
LOG.warn(message);
// the range either side of the first error to print
// this is a purely arbitrary number, to aid user debugging
final int overlap = 10;
for (int i = Math.max(0, first_error_byte - overlap);
i < Math.min(first_error_byte + overlap, len);
i++) {
byte actual = dest[i];
byte expected = src[i];
String letter = toChar(actual);
String line = String.format("[%04d] %2x %s%n", i, actual, letter);
if (expected != actual) {
line = String.format("[%04d] %2x %s -expected %2x %s%n",
i,
actual,
letter,
expected,
toChar(expected));
}
LOG.warn(line);
}
fail(message);
}
}
/**
* Convert a byte to a character for printing. If the
* byte value is < 32 -and hence unprintable- the byte is
* returned as a two digit hex value
* @param b byte
* @return the printable character string
*/
public static String toChar(byte b) {
if (b >= 0x20) {
return Character.toString((char) b);
} else {
return String.format("%02x", b);
}
}
public static String toChar(byte[] buffer) {
StringBuilder builder = new StringBuilder(buffer.length);
for (byte b : buffer) {
builder.append(toChar(b));
}
return builder.toString();
}
public static byte[] toAsciiByteArray(String s) {
char[] chars = s.toCharArray();
int len = chars.length;
byte[] buffer = new byte[len];
for (int i = 0; i < len; i++) {
buffer[i] = (byte) (chars[i] & 0xff);
}
return buffer;
}
public static void cleanupInTeardown(FileSystem fileSystem,
String cleanupPath) {
cleanup("TEARDOWN", fileSystem, cleanupPath);
}
public static void cleanup(String action,
FileSystem fileSystem,
String cleanupPath) {
noteAction(action);
try {
if (fileSystem != null) {
fileSystem.delete(new Path(cleanupPath).makeQualified(fileSystem),
true);
}
} catch (Exception e) {
LOG.error("Error deleting in "+ action + " - " + cleanupPath + ": " + e, e);
}
}
public static void noteAction(String action) {
if (LOG.isDebugEnabled()) {
LOG.debug("============== "+ action +" =============");
}
}
/**
* downgrade a failure to a message and a warning, then an
* exception for the Junit test runner to mark as failed
* @param message text message
* @param failure what failed
* @throws AssumptionViolatedException always
*/
public static void downgrade(String message, Throwable failure) {
LOG.warn("Downgrading test " + message, failure);
AssumptionViolatedException ave =
new AssumptionViolatedException(failure, null);
throw ave;
}
/**
* report an overridden test as unsupported
* @param message message to use in the text
* @throws AssumptionViolatedException always
*/
public static void unsupported(String message) {
throw new AssumptionViolatedException(message);
}
/**
* report a test has been skipped for some reason
* @param message message to use in the text
* @throws AssumptionViolatedException always
*/
public static void skip(String message) {
throw new AssumptionViolatedException(message);
}
/**
* Make an assertion about the length of a file
* @param fs filesystem
* @param path path of the file
* @param expected expected length
* @throws IOException on File IO problems
*/
public static void assertFileHasLength(FileSystem fs, Path path,
int expected) throws IOException {
FileStatus status = fs.getFileStatus(path);
assertEquals(
"Wrong file length of file " + path + " status: " + status,
expected,
status.getLen());
}
/**
* Assert that a path refers to a directory
* @param fs filesystem
* @param path path of the directory
* @throws IOException on File IO problems
*/
public static void assertIsDirectory(FileSystem fs,
Path path) throws IOException {
FileStatus fileStatus = fs.getFileStatus(path);
assertIsDirectory(fileStatus);
}
/**
* Assert that a path refers to a directory
* @param fileStatus stats to check
*/
public static void assertIsDirectory(FileStatus fileStatus) {
assertTrue("Should be a dir -but isn't: " + fileStatus,
fileStatus.isDirectory());
}
/**
* Write the text to a file, returning the converted byte array
* for use in validating the round trip
* @param fs filesystem
* @param path path of file
* @param text text to write
* @param overwrite should the operation overwrite any existing file?
* @return the read bytes
* @throws IOException on IO problems
*/
public static byte[] writeTextFile(FileSystem fs,
Path path,
String text,
boolean overwrite) throws IOException {
FSDataOutputStream stream = fs.create(path, overwrite);
byte[] bytes = new byte[0];
if (text != null) {
bytes = toAsciiByteArray(text);
stream.write(bytes);
}
stream.close();
return bytes;
}
/**
* Touch a file: fails if it is already there
* @param fs filesystem
* @param path path
* @throws IOException IO problems
*/
public static void touch(FileSystem fs,
Path path) throws IOException {
fs.delete(path, true);
writeTextFile(fs, path, null, false);
}
public static void assertDeleted(FileSystem fs,
Path file,
boolean recursive) throws IOException {
assertPathExists(fs, "about to be deleted file", file);
boolean deleted = fs.delete(file, recursive);
String dir = ls(fs, file.getParent());
assertTrue("Delete failed on " + file + ": " + dir, deleted);
assertPathDoesNotExist(fs, "Deleted file", file);
}
/**
* Read in "length" bytes, convert to an ascii string
* @param fs filesystem
* @param path path to read
* @param length #of bytes to read.
* @return the bytes read and converted to a string
* @throws IOException
*/
public static String readBytesToString(FileSystem fs,
Path path,
int length) throws IOException {
FSDataInputStream in = fs.open(path);
try {
byte[] buf = new byte[length];
in.readFully(0, buf);
return toChar(buf);
} finally {
in.close();
}
}
public static String getDefaultWorkingDirectory() {
return "/user/" + System.getProperty("user.name");
}
public static String ls(FileSystem fileSystem, Path path) throws IOException {
return SwiftUtils.ls(fileSystem, path);
}
public static String dumpStats(String pathname, FileStatus[] stats) {
return pathname + SwiftUtils.fileStatsToString(stats,"\n");
}
/**
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
* @param fileSystem filesystem to resolve path against
* @param filename name of the file
* @throws IOException IO problems during file operations
*/
public static void assertIsFile(FileSystem fileSystem, Path filename) throws
IOException {
assertPathExists(fileSystem, "Expected file", filename);
FileStatus status = fileSystem.getFileStatus(filename);
String fileInfo = filename + " " + status;
assertFalse("File claims to be a directory " + fileInfo,
status.isDirectory());
/* disabled for Hadoop v1 compatibility
assertFalse("File claims to be a symlink " + fileInfo,
status.isSymlink());
*/
}
/**
* Create a dataset for use in the tests; all data is in the range
* base to (base+modulo-1) inclusive
* @param len length of data
* @param base base of the data
* @param modulo the modulo
* @return the newly generated dataset
*/
public static byte[] dataset(int len, int base, int modulo) {
byte[] dataset = new byte[len];
for (int i = 0; i < len; i++) {
dataset[i] = (byte) (base + (i % modulo));
}
return dataset;
}
/**
* Assert that a path exists -but make no assertions as to the
* type of that entry
*
* @param fileSystem filesystem to examine
* @param message message to include in the assertion failure message
* @param path path in the filesystem
* @throws IOException IO problems
*/
public static void assertPathExists(FileSystem fileSystem, String message,
Path path) throws IOException {
if (!fileSystem.exists(path)) {
//failure, report it
fail(message + ": not found " + path + " in " + path.getParent());
ls(fileSystem, path.getParent());
}
}
/**
* Assert that a path does not exist
*
* @param fileSystem filesystem to examine
* @param message message to include in the assertion failure message
* @param path path in the filesystem
* @throws IOException IO problems
*/
public static void assertPathDoesNotExist(FileSystem fileSystem,
String message,
Path path) throws IOException {
try {
FileStatus status = fileSystem.getFileStatus(path);
fail(message + ": unexpectedly found " + path + " as " + status);
} catch (FileNotFoundException expected) {
//this is expected
}
}
/**
* Assert that a FileSystem.listStatus on a dir finds the subdir/child entry
* @param fs filesystem
* @param dir directory to scan
* @param subdir full path to look for
* @throws IOException IO probles
*/
public static void assertListStatusFinds(FileSystem fs,
Path dir,
Path subdir) throws IOException {
FileStatus[] stats = fs.listStatus(dir);
boolean found = false;
StringBuilder builder = new StringBuilder();
for (FileStatus stat : stats) {
builder.append(stat.toString()).append('\n');
if (stat.getPath().equals(subdir)) {
found = true;
}
}
assertTrue("Path " + subdir
+ " not found in directory " + dir + ":" + builder,
found);
}
}
| 18,219 | 32.431193 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/DurationStats.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.util;
/**
* Build ongoing statistics from duration data
*/
public class DurationStats {
final String operation;
int n;
long sum;
long min;
long max;
double mean, m2;
/**
* Construct statistics for a given operation.
* @param operation operation
*/
public DurationStats(String operation) {
this.operation = operation;
reset();
}
/**
* construct from anothr stats entry;
* all value are copied.
* @param that the source statistics
*/
public DurationStats(DurationStats that) {
operation = that.operation;
n = that.n;
sum = that.sum;
min = that.min;
max = that.max;
mean = that.mean;
m2 = that.m2;
}
/**
* Add a duration
* @param duration the new duration
*/
public void add(Duration duration) {
add(duration.value());
}
/**
* Add a number
* @param x the number
*/
public void add(long x) {
n++;
sum += x;
double delta = x - mean;
mean += delta / n;
m2 += delta * (x - mean);
if (x < min) {
min = x;
}
if (x > max) {
max = x;
}
}
/**
* Reset the data
*/
public void reset() {
n = 0;
sum = 0;
sum = 0;
min = 10000000;
max = 0;
mean = 0;
m2 = 0;
}
/**
* Get the number of entries sampled
* @return the number of durations added
*/
public int getCount() {
return n;
}
/**
* Get the sum of all durations
* @return all the durations
*/
public long getSum() {
return sum;
}
/**
* Get the arithmetic mean of the aggregate statistics
* @return the arithmetic mean
*/
public double getArithmeticMean() {
return mean;
}
/**
* Variance, sigma^2
* @return variance, or, if no samples are there, 0.
*/
public double getVariance() {
return n > 0 ? (m2 / (n - 1)) : 0;
}
/**
* Get the std deviation, sigma
* @return the stddev, 0 may mean there are no samples.
*/
public double getDeviation() {
double variance = getVariance();
return (variance > 0) ? Math.sqrt(variance) : 0;
}
/**
* Covert to a useful string
* @return a human readable summary
*/
@Override
public String toString() {
return String.format(
"%s count=%d total=%.3fs mean=%.3fs stddev=%.3fs min=%.3fs max=%.3fs",
operation,
n,
sum / 1000.0,
mean / 1000.0,
getDeviation() / 1000000.0,
min / 1000.0,
max / 1000.0);
}
}
| 3,307 | 20.341935 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftObjectFileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.snative;
import java.util.Date;
/**
* Java mapping of Swift JSON file status.
* THIS FILE IS MAPPED BY JACKSON TO AND FROM JSON.
* DO NOT RENAME OR MODIFY FIELDS AND THEIR ACCESSORS.
*/
class SwiftObjectFileStatus {
private long bytes;
private String content_type;
private String hash;
private Date last_modified;
private String name;
private String subdir;
SwiftObjectFileStatus() {
}
SwiftObjectFileStatus(long bytes, String content_type, String hash,
Date last_modified, String name) {
this.bytes = bytes;
this.content_type = content_type;
this.hash = hash;
this.last_modified = last_modified;
this.name = name;
}
public long getBytes() {
return bytes;
}
public void setBytes(long bytes) {
this.bytes = bytes;
}
public String getContent_type() {
return content_type;
}
public void setContent_type(String content_type) {
this.content_type = content_type;
}
public String getHash() {
return hash;
}
public void setHash(String hash) {
this.hash = hash;
}
public Date getLast_modified() {
return last_modified;
}
public void setLast_modified(Date last_modified) {
this.last_modified = last_modified;
}
public String getName() {
return pathToRootPath(name);
}
public void setName(String name) {
this.name = name;
}
public String getSubdir() {
return pathToRootPath(subdir);
}
public void setSubdir(String subdir) {
this.subdir = subdir;
}
/**
* If path doesn't starts with '/'
* method will concat '/'
*
* @param path specified path
* @return root path string
*/
private String pathToRootPath(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/".concat(path);
}
}
| 2,696 | 22.25 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.snative;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
import org.apache.hadoop.fs.swift.exceptions.SwiftException;
import org.apache.hadoop.fs.swift.exceptions.SwiftInternalStateException;
import org.apache.hadoop.fs.swift.util.SwiftUtils;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
/**
* Output stream, buffers data on local disk.
* Writes to Swift on the close() method, unless the
* file is significantly large that it is being written as partitions.
* In this case, the first partition is written on the first write that puts
* data over the partition, as may later writes. The close() then causes
* the final partition to be written, along with a partition manifest.
*/
class SwiftNativeOutputStream extends OutputStream {
public static final int ATTEMPT_LIMIT = 3;
private long filePartSize;
private static final Log LOG =
LogFactory.getLog(SwiftNativeOutputStream.class);
private Configuration conf;
private String key;
private File backupFile;
private OutputStream backupStream;
private SwiftNativeFileSystemStore nativeStore;
private boolean closed;
private int partNumber;
private long blockOffset;
private long bytesWritten;
private long bytesUploaded;
private boolean partUpload = false;
final byte[] oneByte = new byte[1];
/**
* Create an output stream
* @param conf configuration to use
* @param nativeStore native store to write through
* @param key the key to write
* @param partSizeKB the partition size
* @throws IOException
*/
@SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
public SwiftNativeOutputStream(Configuration conf,
SwiftNativeFileSystemStore nativeStore,
String key,
long partSizeKB) throws IOException {
this.conf = conf;
this.key = key;
this.backupFile = newBackupFile();
this.nativeStore = nativeStore;
this.backupStream = new BufferedOutputStream(new FileOutputStream(backupFile));
this.partNumber = 1;
this.blockOffset = 0;
this.filePartSize = 1024L * partSizeKB;
}
private File newBackupFile() throws IOException {
File dir = new File(conf.get("hadoop.tmp.dir"));
if (!dir.mkdirs() && !dir.exists()) {
throw new SwiftException("Cannot create Swift buffer directory: " + dir);
}
File result = File.createTempFile("output-", ".tmp", dir);
result.deleteOnExit();
return result;
}
/**
* Flush the local backing stream.
* This does not trigger a flush of data to the remote blobstore.
* @throws IOException
*/
@Override
public void flush() throws IOException {
backupStream.flush();
}
/**
* check that the output stream is open
*
* @throws SwiftException if it is not
*/
private synchronized void verifyOpen() throws SwiftException {
if (closed) {
throw new SwiftConnectionClosedException();
}
}
/**
* Close the stream. This will trigger the upload of all locally cached
* data to the remote blobstore.
* @throws IOException IO problems uploading the data.
*/
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
try {
closed = true;
//formally declare as closed.
backupStream.close();
backupStream = null;
Path keypath = new Path(key);
if (partUpload) {
partUpload(true);
nativeStore.createManifestForPartUpload(keypath);
} else {
uploadOnClose(keypath);
}
} finally {
delete(backupFile);
backupFile = null;
}
assert backupStream == null: "backup stream has been reopened";
}
/**
* Upload a file when closed, either in one go, or, if the file is
* already partitioned, by uploading the remaining partition and a manifest.
* @param keypath key as a path
* @throws IOException IO Problems
*/
private void uploadOnClose(Path keypath) throws IOException {
boolean uploadSuccess = false;
int attempt = 0;
while (!uploadSuccess) {
try {
++attempt;
bytesUploaded += uploadFileAttempt(keypath, attempt);
uploadSuccess = true;
} catch (IOException e) {
LOG.info("Upload failed " + e, e);
if (attempt > ATTEMPT_LIMIT) {
throw e;
}
}
}
}
@SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
private long uploadFileAttempt(Path keypath, int attempt) throws IOException {
long uploadLen = backupFile.length();
SwiftUtils.debug(LOG, "Closing write of file %s;" +
" localfile=%s of length %d - attempt %d",
key,
backupFile,
uploadLen,
attempt);
nativeStore.uploadFile(keypath,
new FileInputStream(backupFile),
uploadLen);
return uploadLen;
}
@Override
protected void finalize() throws Throwable {
if(!closed) {
LOG.warn("stream not closed");
}
if (backupFile != null) {
LOG.warn("Leaking backing file " + backupFile);
}
}
private void delete(File file) {
if (file != null) {
SwiftUtils.debug(LOG, "deleting %s", file);
if (!file.delete()) {
LOG.warn("Could not delete " + file);
}
}
}
@Override
public void write(int b) throws IOException {
//insert to a one byte array
oneByte[0] = (byte) b;
//then delegate to the array writing routine
write(oneByte, 0, 1);
}
@Override
public synchronized void write(byte[] buffer, int offset, int len) throws
IOException {
//validate args
if (offset < 0 || len < 0 || (offset + len) > buffer.length) {
throw new IndexOutOfBoundsException("Invalid offset/length for write");
}
//validate the output stream
verifyOpen();
SwiftUtils.debug(LOG, " write(offset=%d, len=%d)", offset, len);
// if the size of file is greater than the partition limit
while (blockOffset + len >= filePartSize) {
// - then partition the blob and upload as many partitions
// are needed.
//how many bytes to write for this partition.
int subWriteLen = (int) (filePartSize - blockOffset);
if (subWriteLen < 0 || subWriteLen > len) {
throw new SwiftInternalStateException("Invalid subwrite len: "
+ subWriteLen
+ " -buffer len: " + len);
}
writeToBackupStream(buffer, offset, subWriteLen);
//move the offset along and length down
offset += subWriteLen;
len -= subWriteLen;
//now upload the partition that has just been filled up
// (this also sets blockOffset=0)
partUpload(false);
}
//any remaining data is now written
writeToBackupStream(buffer, offset, len);
}
/**
* Write to the backup stream.
* Guarantees:
* <ol>
* <li>backupStream is open</li>
* <li>blockOffset + len < filePartSize</li>
* </ol>
* @param buffer buffer to write
* @param offset offset in buffer
* @param len length of write.
* @throws IOException backup stream write failing
*/
private void writeToBackupStream(byte[] buffer, int offset, int len) throws
IOException {
assert len >= 0 : "remainder to write is negative";
SwiftUtils.debug(LOG," writeToBackupStream(offset=%d, len=%d)", offset, len);
if (len == 0) {
//no remainder -downgrade to noop
return;
}
//write the new data out to the backup stream
backupStream.write(buffer, offset, len);
//increment the counters
blockOffset += len;
bytesWritten += len;
}
/**
* Upload a single partition. This deletes the local backing-file,
* and re-opens it to create a new one.
* @param closingUpload is this the final upload of an upload
* @throws IOException on IO problems
*/
@SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
private void partUpload(boolean closingUpload) throws IOException {
if (backupStream != null) {
backupStream.close();
}
if (closingUpload && partUpload && backupFile.length() == 0) {
//skipping the upload if
// - it is close time
// - the final partition is 0 bytes long
// - one part has already been written
SwiftUtils.debug(LOG, "skipping upload of 0 byte final partition");
delete(backupFile);
} else {
partUpload = true;
boolean uploadSuccess = false;
int attempt = 0;
while(!uploadSuccess) {
try {
++attempt;
bytesUploaded += uploadFilePartAttempt(attempt);
uploadSuccess = true;
} catch (IOException e) {
LOG.info("Upload failed " + e, e);
if (attempt > ATTEMPT_LIMIT) {
throw e;
}
}
}
delete(backupFile);
partNumber++;
blockOffset = 0;
if (!closingUpload) {
//if not the final upload, create a new output stream
backupFile = newBackupFile();
backupStream =
new BufferedOutputStream(new FileOutputStream(backupFile));
}
}
}
@SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
private long uploadFilePartAttempt(int attempt) throws IOException {
long uploadLen = backupFile.length();
SwiftUtils.debug(LOG, "Uploading part %d of file %s;" +
" localfile=%s of length %d - attempt %d",
partNumber,
key,
backupFile,
uploadLen,
attempt);
nativeStore.uploadFilePart(new Path(key),
partNumber,
new FileInputStream(backupFile),
uploadLen);
return uploadLen;
}
/**
* Get the file partition size
* @return the partition size
*/
long getFilePartSize() {
return filePartSize;
}
/**
* Query the number of partitions written
* This is intended for testing
* @return the of partitions already written to the remote FS
*/
synchronized int getPartitionsWritten() {
return partNumber - 1;
}
/**
* Get the number of bytes written to the output stream.
* This should always be less than or equal to bytesUploaded.
* @return the number of bytes written to this stream
*/
long getBytesWritten() {
return bytesWritten;
}
/**
* Get the number of bytes uploaded to remote Swift cluster.
* bytesUploaded -bytesWritten = the number of bytes left to upload
* @return the number of bytes written to the remote endpoint
*/
long getBytesUploaded() {
return bytesUploaded;
}
@Override
public String toString() {
return "SwiftNativeOutputStream{" +
", key='" + key + '\'' +
", backupFile=" + backupFile +
", closed=" + closed +
", filePartSize=" + filePartSize +
", partNumber=" + partNumber +
", blockOffset=" + blockOffset +
", partUpload=" + partUpload +
", nativeStore=" + nativeStore +
", bytesWritten=" + bytesWritten +
", bytesUploaded=" + bytesUploaded +
'}';
}
}
| 12,604 | 31.320513 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.snative;
import org.apache.commons.httpclient.Header;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
import org.apache.hadoop.fs.swift.exceptions.SwiftException;
import org.apache.hadoop.fs.swift.exceptions.SwiftInvalidResponseException;
import org.apache.hadoop.fs.swift.exceptions.SwiftOperationFailedException;
import org.apache.hadoop.fs.swift.http.HttpBodyContent;
import org.apache.hadoop.fs.swift.http.SwiftProtocolConstants;
import org.apache.hadoop.fs.swift.http.SwiftRestClient;
import org.apache.hadoop.fs.swift.util.DurationStats;
import org.apache.hadoop.fs.swift.util.JSONUtil;
import org.apache.hadoop.fs.swift.util.SwiftObjectPath;
import org.apache.hadoop.fs.swift.util.SwiftUtils;
import org.codehaus.jackson.map.type.CollectionType;
import java.io.ByteArrayInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* File system store implementation.
* Makes REST requests, parses data from responses
*/
public class SwiftNativeFileSystemStore {
private static final Pattern URI_PATTERN = Pattern.compile("\"\\S+?\"");
private static final String PATTERN = "EEE, d MMM yyyy hh:mm:ss zzz";
private static final Log LOG =
LogFactory.getLog(SwiftNativeFileSystemStore.class);
private URI uri;
private SwiftRestClient swiftRestClient;
/**
* Initalize the filesystem store -this creates the REST client binding.
*
* @param fsURI URI of the filesystem, which is used to map to the filesystem-specific
* options in the configuration file
* @param configuration configuration
* @throws IOException on any failure.
*/
public void initialize(URI fsURI, Configuration configuration) throws IOException {
this.uri = fsURI;
this.swiftRestClient = SwiftRestClient.getInstance(fsURI, configuration);
}
@Override
public String toString() {
return "SwiftNativeFileSystemStore with "
+ swiftRestClient;
}
/**
* Get the default blocksize of this (bound) filesystem
* @return the blocksize returned for all FileStatus queries,
* which is used by the MapReduce splitter.
*/
public long getBlocksize() {
return 1024L * swiftRestClient.getBlocksizeKB();
}
public long getPartsizeKB() {
return swiftRestClient.getPartSizeKB();
}
public int getBufferSizeKB() {
return swiftRestClient.getBufferSizeKB();
}
public int getThrottleDelay() {
return swiftRestClient.getThrottleDelay();
}
/**
* Upload a file/input stream of a specific length.
*
* @param path destination path in the swift filesystem
* @param inputStream input data. This is closed afterwards, always
* @param length length of the data
* @throws IOException on a problem
*/
public void uploadFile(Path path, InputStream inputStream, long length)
throws IOException {
swiftRestClient.upload(toObjectPath(path), inputStream, length);
}
/**
* Upload part of a larger file.
*
* @param path destination path
* @param partNumber item number in the path
* @param inputStream input data
* @param length length of the data
* @throws IOException on a problem
*/
public void uploadFilePart(Path path, int partNumber,
InputStream inputStream, long length)
throws IOException {
String stringPath = path.toUri().toString();
String partitionFilename = SwiftUtils.partitionFilenameFromNumber(
partNumber);
if (stringPath.endsWith("/")) {
stringPath = stringPath.concat(partitionFilename);
} else {
stringPath = stringPath.concat("/").concat(partitionFilename);
}
swiftRestClient.upload(
new SwiftObjectPath(toDirPath(path).getContainer(), stringPath),
inputStream,
length);
}
/**
* Tell the Swift server to expect a multi-part upload by submitting
* a 0-byte file with the X-Object-Manifest header
*
* @param path path of final final
* @throws IOException
*/
public void createManifestForPartUpload(Path path) throws IOException {
String pathString = toObjectPath(path).toString();
if (!pathString.endsWith("/")) {
pathString = pathString.concat("/");
}
if (pathString.startsWith("/")) {
pathString = pathString.substring(1);
}
swiftRestClient.upload(toObjectPath(path),
new ByteArrayInputStream(new byte[0]),
0,
new Header(SwiftProtocolConstants.X_OBJECT_MANIFEST, pathString));
}
/**
* Get the metadata of an object
*
* @param path path
* @return file metadata. -or null if no headers were received back from the server.
* @throws IOException on a problem
* @throws FileNotFoundException if there is nothing at the end
*/
public SwiftFileStatus getObjectMetadata(Path path) throws IOException {
return getObjectMetadata(path, true);
}
/**
* Get the HTTP headers, in case you really need the low-level
* metadata
* @param path path to probe
* @param newest newest or oldest?
* @return the header list
* @throws IOException IO problem
* @throws FileNotFoundException if there is nothing at the end
*/
public Header[] getObjectHeaders(Path path, boolean newest)
throws IOException, FileNotFoundException {
SwiftObjectPath objectPath = toObjectPath(path);
return stat(objectPath, newest);
}
/**
* Get the metadata of an object
*
* @param path path
* @param newest flag to say "set the newest header", otherwise take any entry
* @return file metadata. -or null if no headers were received back from the server.
* @throws IOException on a problem
* @throws FileNotFoundException if there is nothing at the end
*/
public SwiftFileStatus getObjectMetadata(Path path, boolean newest)
throws IOException, FileNotFoundException {
SwiftObjectPath objectPath = toObjectPath(path);
final Header[] headers = stat(objectPath, newest);
//no headers is treated as a missing file
if (headers.length == 0) {
throw new FileNotFoundException("Not Found " + path.toUri());
}
boolean isDir = false;
long length = 0;
long lastModified = 0 ;
for (Header header : headers) {
String headerName = header.getName();
if (headerName.equals(SwiftProtocolConstants.X_CONTAINER_OBJECT_COUNT) ||
headerName.equals(SwiftProtocolConstants.X_CONTAINER_BYTES_USED)) {
length = 0;
isDir = true;
}
if (SwiftProtocolConstants.HEADER_CONTENT_LENGTH.equals(headerName)) {
length = Long.parseLong(header.getValue());
}
if (SwiftProtocolConstants.HEADER_LAST_MODIFIED.equals(headerName)) {
final SimpleDateFormat simpleDateFormat = new SimpleDateFormat(PATTERN);
try {
lastModified = simpleDateFormat.parse(header.getValue()).getTime();
} catch (ParseException e) {
throw new SwiftException("Failed to parse " + header.toString(), e);
}
}
}
if (lastModified == 0) {
lastModified = System.currentTimeMillis();
}
Path correctSwiftPath = getCorrectSwiftPath(path);
return new SwiftFileStatus(length,
isDir,
1,
getBlocksize(),
lastModified,
correctSwiftPath);
}
private Header[] stat(SwiftObjectPath objectPath, boolean newest) throws
IOException {
Header[] headers;
if (newest) {
headers = swiftRestClient.headRequest("getObjectMetadata-newest",
objectPath, SwiftRestClient.NEWEST);
} else {
headers = swiftRestClient.headRequest("getObjectMetadata",
objectPath);
}
return headers;
}
/**
* Get the object as an input stream
*
* @param path object path
* @return the input stream -this must be closed to terminate the connection
* @throws IOException IO problems
* @throws FileNotFoundException path doesn't resolve to an object
*/
public HttpBodyContent getObject(Path path) throws IOException {
return swiftRestClient.getData(toObjectPath(path),
SwiftRestClient.NEWEST);
}
/**
* Get the input stream starting from a specific point.
*
* @param path path to object
* @param byteRangeStart starting point
* @param length no. of bytes
* @return an input stream that must be closed
* @throws IOException IO problems
*/
public HttpBodyContent getObject(Path path, long byteRangeStart, long length)
throws IOException {
return swiftRestClient.getData(
toObjectPath(path), byteRangeStart, length);
}
/**
* List a directory.
* This is O(n) for the number of objects in this path.
*
*
*
* @param path working path
* @param listDeep ask for all the data
* @param newest ask for the newest data
* @return Collection of file statuses
* @throws IOException IO problems
* @throws FileNotFoundException if the path does not exist
*/
private List<FileStatus> listDirectory(SwiftObjectPath path,
boolean listDeep,
boolean newest) throws IOException {
final byte[] bytes;
final ArrayList<FileStatus> files = new ArrayList<FileStatus>();
final Path correctSwiftPath = getCorrectSwiftPath(path);
try {
bytes = swiftRestClient.listDeepObjectsInDirectory(path, listDeep);
} catch (FileNotFoundException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("" +
"File/Directory not found " + path);
}
if (SwiftUtils.isRootDir(path)) {
return Collections.emptyList();
} else {
throw e;
}
} catch (SwiftInvalidResponseException e) {
//bad HTTP error code
if (e.getStatusCode() == HttpStatus.SC_NO_CONTENT) {
//this can come back on a root list if the container is empty
if (SwiftUtils.isRootDir(path)) {
return Collections.emptyList();
} else {
//NO_CONTENT returned on something other than the root directory;
//see if it is there, and convert to empty list or not found
//depending on whether the entry exists.
FileStatus stat = getObjectMetadata(correctSwiftPath, newest);
if (stat.isDirectory()) {
//it's an empty directory. state that
return Collections.emptyList();
} else {
//it's a file -return that as the status
files.add(stat);
return files;
}
}
} else {
//a different status code: rethrow immediately
throw e;
}
}
final CollectionType collectionType = JSONUtil.getJsonMapper().getTypeFactory().
constructCollectionType(List.class, SwiftObjectFileStatus.class);
final List<SwiftObjectFileStatus> fileStatusList = JSONUtil.toObject(
new String(bytes, Charset.forName("UTF-8")), collectionType);
//this can happen if user lists file /data/files/file
//in this case swift will return empty array
if (fileStatusList.isEmpty()) {
SwiftFileStatus objectMetadata = getObjectMetadata(correctSwiftPath,
newest);
if (objectMetadata.isFile()) {
files.add(objectMetadata);
}
return files;
}
for (SwiftObjectFileStatus status : fileStatusList) {
if (status.getName() != null) {
files.add(new SwiftFileStatus(status.getBytes(),
status.getBytes() == 0,
1,
getBlocksize(),
status.getLast_modified().getTime(),
getCorrectSwiftPath(new Path(status.getName()))));
}
}
return files;
}
/**
* List all elements in this directory
*
*
*
* @param path path to work with
* @param recursive do a recursive get
* @param newest ask for the newest, or can some out of date data work?
* @return the file statuses, or an empty array if there are no children
* @throws IOException on IO problems
* @throws FileNotFoundException if the path is nonexistent
*/
public FileStatus[] listSubPaths(Path path,
boolean recursive,
boolean newest) throws IOException {
final Collection<FileStatus> fileStatuses;
fileStatuses = listDirectory(toDirPath(path), recursive, newest);
return fileStatuses.toArray(new FileStatus[fileStatuses.size()]);
}
/**
* Create a directory
*
* @param path path
* @throws IOException
*/
public void createDirectory(Path path) throws IOException {
innerCreateDirectory(toDirPath(path));
}
/**
* The inner directory creation option. This only creates
* the dir at the given path, not any parent dirs.
* @param swiftObjectPath swift object path at which a 0-byte blob should be
* put
* @throws IOException IO problems
*/
private void innerCreateDirectory(SwiftObjectPath swiftObjectPath)
throws IOException {
swiftRestClient.putRequest(swiftObjectPath);
}
private SwiftObjectPath toDirPath(Path path) throws
SwiftConfigurationException {
return SwiftObjectPath.fromPath(uri, path, false);
}
private SwiftObjectPath toObjectPath(Path path) throws
SwiftConfigurationException {
return SwiftObjectPath.fromPath(uri, path);
}
/**
* Try to find the specific server(s) on which the data lives
* @param path path to probe
* @return a possibly empty list of locations
* @throws IOException on problems determining the locations
*/
public List<URI> getObjectLocation(Path path) throws IOException {
final byte[] objectLocation;
objectLocation = swiftRestClient.getObjectLocation(toObjectPath(path));
if (objectLocation == null || objectLocation.length == 0) {
//no object location, return an empty list
return new LinkedList<URI>();
}
return extractUris(new String(objectLocation, Charset.forName("UTF-8")), path);
}
/**
* deletes object from Swift
*
* @param path path to delete
* @return true if the path was deleted by this specific operation.
* @throws IOException on a failure
*/
public boolean deleteObject(Path path) throws IOException {
SwiftObjectPath swiftObjectPath = toObjectPath(path);
if (!SwiftUtils.isRootDir(swiftObjectPath)) {
return swiftRestClient.delete(swiftObjectPath);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Not deleting root directory entry");
}
return true;
}
}
/**
* deletes a directory from Swift. This is not recursive
*
* @param path path to delete
* @return true if the path was deleted by this specific operation -or
* the path was root and not acted on.
* @throws IOException on a failure
*/
public boolean rmdir(Path path) throws IOException {
return deleteObject(path);
}
/**
* Does the object exist
*
* @param path object path
* @return true if the metadata of an object could be retrieved
* @throws IOException IO problems other than FileNotFound, which
* is downgraded to an object does not exist return code
*/
public boolean objectExists(Path path) throws IOException {
return objectExists(toObjectPath(path));
}
/**
* Does the object exist
*
* @param path swift object path
* @return true if the metadata of an object could be retrieved
* @throws IOException IO problems other than FileNotFound, which
* is downgraded to an object does not exist return code
*/
public boolean objectExists(SwiftObjectPath path) throws IOException {
try {
Header[] headers = swiftRestClient.headRequest("objectExists",
path,
SwiftRestClient.NEWEST);
//no headers is treated as a missing file
return headers.length != 0;
} catch (FileNotFoundException e) {
return false;
}
}
/**
* Rename through copy-and-delete. this is a consequence of the
* Swift filesystem using the path as the hash
* into the Distributed Hash Table, "the ring" of filenames.
* <p>
* Because of the nature of the operation, it is not atomic.
*
* @param src source file/dir
* @param dst destination
* @throws IOException IO failure
* @throws SwiftOperationFailedException if the rename failed
* @throws FileNotFoundException if the source directory is missing, or
* the parent directory of the destination
*/
public void rename(Path src, Path dst)
throws FileNotFoundException, SwiftOperationFailedException, IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("mv " + src + " " + dst);
}
boolean renamingOnToSelf = src.equals(dst);
SwiftObjectPath srcObject = toObjectPath(src);
SwiftObjectPath destObject = toObjectPath(dst);
if (SwiftUtils.isRootDir(srcObject)) {
throw new SwiftOperationFailedException("cannot rename root dir");
}
final SwiftFileStatus srcMetadata;
srcMetadata = getObjectMetadata(src);
SwiftFileStatus dstMetadata;
try {
dstMetadata = getObjectMetadata(dst);
} catch (FileNotFoundException e) {
//destination does not exist.
LOG.debug("Destination does not exist");
dstMetadata = null;
}
//check to see if the destination parent directory exists
Path srcParent = src.getParent();
Path dstParent = dst.getParent();
//skip the overhead of a HEAD call if the src and dest share the same
//parent dir (in which case the dest dir exists), or the destination
//directory is root, in which case it must also exist
if (dstParent != null && !dstParent.equals(srcParent)) {
try {
getObjectMetadata(dstParent);
} catch (FileNotFoundException e) {
//destination parent doesn't exist; bail out
LOG.debug("destination parent directory " + dstParent + " doesn't exist");
throw e;
}
}
boolean destExists = dstMetadata != null;
boolean destIsDir = destExists && SwiftUtils.isDirectory(dstMetadata);
//calculate the destination
SwiftObjectPath destPath;
//enum the child entries and everything underneath
List<FileStatus> childStats = listDirectory(srcObject, true, true);
boolean srcIsFile = !srcMetadata.isDir();
if (srcIsFile) {
//source is a simple file OR a partitioned file
// outcomes:
// #1 dest exists and is file: fail
// #2 dest exists and is dir: destination path becomes under dest dir
// #3 dest does not exist: use dest as name
if (destExists) {
if (destIsDir) {
//outcome #2 -move to subdir of dest
destPath = toObjectPath(new Path(dst, src.getName()));
} else {
//outcome #1 dest it's a file: fail if differeent
if (!renamingOnToSelf) {
throw new FileAlreadyExistsException(
"cannot rename a file over one that already exists");
} else {
//is mv self self where self is a file. this becomes a no-op
LOG.debug("Renaming file onto self: no-op => success");
return;
}
}
} else {
//outcome #3 -new entry
destPath = toObjectPath(dst);
}
int childCount = childStats.size();
//here there is one of:
// - a single object ==> standard file
// ->
if (childCount == 0) {
copyThenDeleteObject(srcObject, destPath);
} else {
//do the copy
SwiftUtils.debug(LOG, "Source file appears to be partitioned." +
" copying file and deleting children");
copyObject(srcObject, destPath);
for (FileStatus stat : childStats) {
SwiftUtils.debug(LOG, "Deleting partitioned file %s ", stat);
deleteObject(stat.getPath());
}
swiftRestClient.delete(srcObject);
}
} else {
//here the source exists and is a directory
// outcomes (given we know the parent dir exists if we get this far)
// #1 destination is a file: fail
// #2 destination is a directory: create a new dir under that one
// #3 destination doesn't exist: create a new dir with that name
// #3 and #4 are only allowed if the dest path is not == or under src
if (destExists && !destIsDir) {
// #1 destination is a file: fail
throw new FileAlreadyExistsException(
"the source is a directory, but not the destination");
}
Path targetPath;
if (destExists) {
// #2 destination is a directory: create a new dir under that one
targetPath = new Path(dst, src.getName());
} else {
// #3 destination doesn't exist: create a new dir with that name
targetPath = dst;
}
SwiftObjectPath targetObjectPath = toObjectPath(targetPath);
//final check for any recursive operations
if (srcObject.isEqualToOrParentOf(targetObjectPath)) {
//you can't rename a directory onto itself
throw new SwiftOperationFailedException(
"cannot move a directory under itself");
}
LOG.info("mv " + srcObject + " " + targetPath);
logDirectory("Directory to copy ", srcObject, childStats);
// iterative copy of everything under the directory.
// by listing all children this can be done iteratively
// rather than recursively -everything in this list is either a file
// or a 0-byte-len file pretending to be a directory.
String srcURI = src.toUri().toString();
int prefixStripCount = srcURI.length() + 1;
for (FileStatus fileStatus : childStats) {
Path copySourcePath = fileStatus.getPath();
String copySourceURI = copySourcePath.toUri().toString();
String copyDestSubPath = copySourceURI.substring(prefixStripCount);
Path copyDestPath = new Path(targetPath, copyDestSubPath);
if (LOG.isTraceEnabled()) {
//trace to debug some low-level rename path problems; retained
//in case they ever come back.
LOG.trace("srcURI=" + srcURI
+ "; copySourceURI=" + copySourceURI
+ "; copyDestSubPath=" + copyDestSubPath
+ "; copyDestPath=" + copyDestPath);
}
SwiftObjectPath copyDestination = toObjectPath(copyDestPath);
try {
copyThenDeleteObject(toObjectPath(copySourcePath),
copyDestination);
} catch (FileNotFoundException e) {
LOG.info("Skipping rename of " + copySourcePath);
}
//add a throttle delay
throttle();
}
//now rename self. If missing, create the dest directory and warn
if (!SwiftUtils.isRootDir(srcObject)) {
try {
copyThenDeleteObject(srcObject,
targetObjectPath);
} catch (FileNotFoundException e) {
//create the destination directory
LOG.warn("Source directory deleted during rename", e);
innerCreateDirectory(destObject);
}
}
}
}
/**
* Debug action to dump directory statuses to the debug log
*
* @param message explanation
* @param objectPath object path (can be null)
* @param statuses listing output
*/
private void logDirectory(String message, SwiftObjectPath objectPath,
Iterable<FileStatus> statuses) {
if (LOG.isDebugEnabled()) {
LOG.debug(message + ": listing of " + objectPath);
for (FileStatus fileStatus : statuses) {
LOG.debug(fileStatus.getPath());
}
}
}
public void copy(Path srcKey, Path dstKey) throws IOException {
SwiftObjectPath srcObject = toObjectPath(srcKey);
SwiftObjectPath destObject = toObjectPath(dstKey);
swiftRestClient.copyObject(srcObject, destObject);
}
/**
* Copy an object then, if the copy worked, delete it.
* If the copy failed, the source object is not deleted.
*
* @param srcObject source object path
* @param destObject destination object path
* @throws IOException IO problems
*/
private void copyThenDeleteObject(SwiftObjectPath srcObject,
SwiftObjectPath destObject) throws
IOException {
//do the copy
copyObject(srcObject, destObject);
//getting here means the copy worked
swiftRestClient.delete(srcObject);
}
/**
* Copy an object
* @param srcObject source object path
* @param destObject destination object path
* @throws IOException IO problems
*/
private void copyObject(SwiftObjectPath srcObject,
SwiftObjectPath destObject) throws
IOException {
if (srcObject.isEqualToOrParentOf(destObject)) {
throw new SwiftException(
"Can't copy " + srcObject + " onto " + destObject);
}
//do the copy
boolean copySucceeded = swiftRestClient.copyObject(srcObject, destObject);
if (!copySucceeded) {
throw new SwiftException("Copy of " + srcObject + " to "
+ destObject + "failed");
}
}
/**
* Take a Hadoop path and return one which uses the URI prefix and authority
* of this FS. It doesn't make a relative path absolute
* @param path path in
* @return path with a URI bound to this FS
* @throws SwiftException URI cannot be created.
*/
public Path getCorrectSwiftPath(Path path) throws
SwiftException {
try {
final URI fullUri = new URI(uri.getScheme(),
uri.getAuthority(),
path.toUri().getPath(),
null,
null);
return new Path(fullUri);
} catch (URISyntaxException e) {
throw new SwiftException("Specified path " + path + " is incorrect", e);
}
}
/**
* Builds a hadoop-Path from a swift path, inserting the URI authority
* of this FS instance
* @param path swift object path
* @return Hadoop path
* @throws SwiftException if the URI couldn't be created.
*/
private Path getCorrectSwiftPath(SwiftObjectPath path) throws
SwiftException {
try {
final URI fullUri = new URI(uri.getScheme(),
uri.getAuthority(),
path.getObject(),
null,
null);
return new Path(fullUri);
} catch (URISyntaxException e) {
throw new SwiftException("Specified path " + path + " is incorrect", e);
}
}
/**
* extracts URIs from json
* @param json json to parse
* @param path path (used in exceptions)
* @return URIs
* @throws SwiftOperationFailedException on any problem parsing the JSON
*/
public static List<URI> extractUris(String json, Path path) throws
SwiftOperationFailedException {
final Matcher matcher = URI_PATTERN.matcher(json);
final List<URI> result = new ArrayList<URI>();
while (matcher.find()) {
final String s = matcher.group();
final String uri = s.substring(1, s.length() - 1);
try {
URI createdUri = URI.create(uri);
result.add(createdUri);
} catch (IllegalArgumentException e) {
//failure to create the URI, which means this is bad JSON. Convert
//to an exception with useful text
throw new SwiftOperationFailedException(
String.format(
"could not convert \"%s\" into a URI." +
" source: %s " +
" first JSON: %s",
uri, path, json.substring(0, 256)));
}
}
return result;
}
/**
* Insert a throttled wait if the throttle delay > 0
* @throws InterruptedIOException if interrupted during sleep
*/
public void throttle() throws InterruptedIOException {
int throttleDelay = getThrottleDelay();
if (throttleDelay > 0) {
try {
Thread.sleep(throttleDelay);
} catch (InterruptedException e) {
//convert to an IOE
throw (InterruptedIOException) new InterruptedIOException(e.toString())
.initCause(e);
}
}
}
/**
* Get the current operation statistics
* @return a snapshot of the statistics
*/
public List<DurationStats> getOperationStatistics() {
return swiftRestClient.getOperationStatistics();
}
/**
* Delete the entire tree. This is an internal one with slightly different
* behavior: if an entry is missing, a {@link FileNotFoundException} is
* raised. This lets the caller distinguish a file not found with
* other reasons for failure, so handles race conditions in recursive
* directory deletes better.
* <p>
* The problem being addressed is: caller A requests a recursive directory
* of directory /dir ; caller B requests a delete of a file /dir/file,
* between caller A enumerating the files contents, and requesting a delete
* of /dir/file. We want to recognise the special case
* "directed file is no longer there" and not convert that into a failure
*
* @param absolutePath the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception if the
* directory is not empty
* case of a file the recursive can be set to either true or false.
* @return true if the object was deleted
* @throws IOException IO problems
* @throws FileNotFoundException if a file/dir being deleted is not there -
* this includes entries below the specified path, (if the path is a dir
* and recursive is true)
*/
public boolean delete(Path absolutePath, boolean recursive) throws IOException {
Path swiftPath = getCorrectSwiftPath(absolutePath);
SwiftUtils.debug(LOG, "Deleting path '%s' recursive=%b",
absolutePath,
recursive);
boolean askForNewest = true;
SwiftFileStatus fileStatus = getObjectMetadata(swiftPath, askForNewest);
//ask for the file/dir status, but don't demand the newest, as we
//don't mind if the directory has changed
//list all entries under this directory.
//this will throw FileNotFoundException if the file isn't there
FileStatus[] statuses = listSubPaths(absolutePath, true, askForNewest);
if (statuses == null) {
//the directory went away during the non-atomic stages of the operation.
// Return false as it was not this thread doing the deletion.
SwiftUtils.debug(LOG, "Path '%s' has no status -it has 'gone away'",
absolutePath,
recursive);
return false;
}
int filecount = statuses.length;
SwiftUtils.debug(LOG, "Path '%s' %d status entries'",
absolutePath,
filecount);
if (filecount == 0) {
//it's an empty directory or a path
rmdir(absolutePath);
return true;
}
if (LOG.isDebugEnabled()) {
SwiftUtils.debug(LOG, "%s", SwiftUtils.fileStatsToString(statuses, "\n"));
}
if (filecount == 1 && swiftPath.equals(statuses[0].getPath())) {
// 1 entry => simple file and it is the target
//simple file: delete it
SwiftUtils.debug(LOG, "Deleting simple file %s", absolutePath);
deleteObject(absolutePath);
return true;
}
//>1 entry implies directory with children. Run through them,
// but first check for the recursive flag and reject it *unless it looks
// like a partitioned file (len > 0 && has children)
if (!fileStatus.isDir()) {
LOG.debug("Multiple child entries but entry has data: assume partitioned");
} else if (!recursive) {
//if there are children, unless this is a recursive operation, fail immediately
throw new SwiftOperationFailedException("Directory " + fileStatus
+ " is not empty: "
+ SwiftUtils.fileStatsToString(
statuses, "; "));
}
//delete the entries. including ourself.
for (FileStatus entryStatus : statuses) {
Path entryPath = entryStatus.getPath();
try {
boolean deleted = deleteObject(entryPath);
if (!deleted) {
SwiftUtils.debug(LOG, "Failed to delete entry '%s'; continuing",
entryPath);
}
} catch (FileNotFoundException e) {
//the path went away -race conditions.
//do not fail, as the outcome is still OK.
SwiftUtils.debug(LOG, "Path '%s' is no longer present; continuing",
entryPath);
}
throttle();
}
//now delete self
SwiftUtils.debug(LOG, "Deleting base entry %s", absolutePath);
deleteObject(absolutePath);
return true;
}
}
| 35,024 | 34.739796 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.snative;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
import org.apache.hadoop.fs.swift.exceptions.SwiftOperationFailedException;
import org.apache.hadoop.fs.swift.exceptions.SwiftUnsupportedFeatureException;
import org.apache.hadoop.fs.swift.http.SwiftProtocolConstants;
import org.apache.hadoop.fs.swift.util.DurationStats;
import org.apache.hadoop.fs.swift.util.SwiftObjectPath;
import org.apache.hadoop.fs.swift.util.SwiftUtils;
import org.apache.hadoop.util.Progressable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
/**
* Swift file system implementation. Extends Hadoop FileSystem
*/
public class SwiftNativeFileSystem extends FileSystem {
/** filesystem prefix: {@value} */
public static final String SWIFT = "swift";
private static final Log LOG =
LogFactory.getLog(SwiftNativeFileSystem.class);
/**
* path to user work directory for storing temporary files
*/
private Path workingDir;
/**
* Swift URI
*/
private URI uri;
/**
* reference to swiftFileSystemStore
*/
private SwiftNativeFileSystemStore store;
/**
* Default constructor for Hadoop
*/
public SwiftNativeFileSystem() {
// set client in initialize()
}
/**
* This constructor used for testing purposes
*/
public SwiftNativeFileSystem(SwiftNativeFileSystemStore store) {
this.store = store;
}
/**
* This is for testing
* @return the inner store class
*/
public SwiftNativeFileSystemStore getStore() {
return store;
}
@Override
public String getScheme() {
return SWIFT;
}
/**
* default class initialization
*
* @param fsuri path to Swift
* @param conf Hadoop configuration
* @throws IOException
*/
@Override
public void initialize(URI fsuri, Configuration conf) throws IOException {
super.initialize(fsuri, conf);
setConf(conf);
if (store == null) {
store = new SwiftNativeFileSystemStore();
}
this.uri = fsuri;
String username = System.getProperty("user.name");
this.workingDir = new Path("/user", username)
.makeQualified(uri, new Path(username));
if (LOG.isDebugEnabled()) {
LOG.debug("Initializing SwiftNativeFileSystem against URI " + uri
+ " and working dir " + workingDir);
}
store.initialize(uri, conf);
LOG.debug("SwiftFileSystem initialized");
}
/**
* @return path to Swift
*/
@Override
public URI getUri() {
return uri;
}
@Override
public String toString() {
return "Swift FileSystem " + store;
}
/**
* Path to user working directory
*
* @return Hadoop path
*/
@Override
public Path getWorkingDirectory() {
return workingDir;
}
/**
* @param dir user working directory
*/
@Override
public void setWorkingDirectory(Path dir) {
workingDir = makeAbsolute(dir);
if (LOG.isDebugEnabled()) {
LOG.debug("SwiftFileSystem.setWorkingDirectory to " + dir);
}
}
/**
* Return a file status object that represents the path.
*
* @param path The path we want information from
* @return a FileStatus object
*/
@Override
public FileStatus getFileStatus(Path path) throws IOException {
Path absolutePath = makeAbsolute(path);
return store.getObjectMetadata(absolutePath);
}
/**
* The blocksize of this filesystem is set by the property
* SwiftProtocolConstants.SWIFT_BLOCKSIZE;the default is the value of
* SwiftProtocolConstants.DEFAULT_SWIFT_BLOCKSIZE;
* @return the blocksize for this FS.
*/
@Override
public long getDefaultBlockSize() {
return store.getBlocksize();
}
/**
* The blocksize for this filesystem.
* @see #getDefaultBlockSize()
* @param f path of file
* @return the blocksize for the path
*/
@Override
public long getDefaultBlockSize(Path f) {
return store.getBlocksize();
}
@Override
public long getBlockSize(Path path) throws IOException {
return store.getBlocksize();
}
@Override
public boolean isFile(Path f) throws IOException {
try {
FileStatus fileStatus = getFileStatus(f);
return !SwiftUtils.isDirectory(fileStatus);
} catch (FileNotFoundException e) {
return false; // f does not exist
}
}
@Override
public boolean isDirectory(Path f) throws IOException {
try {
FileStatus fileStatus = getFileStatus(f);
return SwiftUtils.isDirectory(fileStatus);
} catch (FileNotFoundException e) {
return false; // f does not exist
}
}
/**
* Return an array containing hostnames, offset and size of
* portions of the given file. For a nonexistent
* file or regions, null will be returned.
* <p>
* This call is most helpful with DFS, where it returns
* hostnames of machines that contain the given file.
* <p>
* The FileSystem will simply return an elt containing 'localhost'.
*/
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file,
long start,
long len) throws IOException {
//argument checks
if (file == null) {
return null;
}
if (start < 0 || len < 0) {
throw new IllegalArgumentException("Negative start or len parameter" +
" to getFileBlockLocations");
}
if (file.getLen() <= start) {
return new BlockLocation[0];
}
// Check if requested file in Swift is more than 5Gb. In this case
// each block has its own location -which may be determinable
// from the Swift client API, depending on the remote server
final FileStatus[] listOfFileBlocks = store.listSubPaths(file.getPath(),
false,
true);
List<URI> locations = new ArrayList<URI>();
if (listOfFileBlocks.length > 1) {
for (FileStatus fileStatus : listOfFileBlocks) {
if (SwiftObjectPath.fromPath(uri, fileStatus.getPath())
.equals(SwiftObjectPath.fromPath(uri, file.getPath()))) {
continue;
}
locations.addAll(store.getObjectLocation(fileStatus.getPath()));
}
} else {
locations = store.getObjectLocation(file.getPath());
}
if (locations.isEmpty()) {
LOG.debug("No locations returned for " + file.getPath());
//no locations were returned for the object
//fall back to the superclass
String[] name = {SwiftProtocolConstants.BLOCK_LOCATION};
String[] host = { "localhost" };
String[] topology={SwiftProtocolConstants.TOPOLOGY_PATH};
return new BlockLocation[] {
new BlockLocation(name, host, topology,0, file.getLen())
};
}
final String[] names = new String[locations.size()];
final String[] hosts = new String[locations.size()];
int i = 0;
for (URI location : locations) {
hosts[i] = location.getHost();
names[i] = location.getAuthority();
i++;
}
return new BlockLocation[]{
new BlockLocation(names, hosts, 0, file.getLen())
};
}
/**
* Create the parent directories.
* As an optimization, the entire hierarchy of parent
* directories is <i>Not</i> polled. Instead
* the tree is walked up from the last to the first,
* creating directories until one that exists is found.
*
* This strategy means if a file is created in an existing directory,
* one quick poll sufficies.
*
* There is a big assumption here: that all parent directories of an existing
* directory also exists.
* @param path path to create.
* @param permission to apply to files
* @return true if the operation was successful
* @throws IOException on a problem
*/
@Override
public boolean mkdirs(Path path, FsPermission permission) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("SwiftFileSystem.mkdirs: " + path);
}
Path directory = makeAbsolute(path);
//build a list of paths to create
List<Path> paths = new ArrayList<Path>();
while (shouldCreate(directory)) {
//this directory needs creation, add to the list
paths.add(0, directory);
//now see if the parent needs to be created
directory = directory.getParent();
}
//go through the list of directories to create
for (Path p : paths) {
if (isNotRoot(p)) {
//perform a mkdir operation without any polling of
//the far end first
forceMkdir(p);
}
}
//if an exception was not thrown, this operation is considered
//a success
return true;
}
private boolean isNotRoot(Path absolutePath) {
return !isRoot(absolutePath);
}
private boolean isRoot(Path absolutePath) {
return absolutePath.getParent() == null;
}
/**
* internal implementation of directory creation.
*
* @param path path to file
* @return boolean file is created; false: no need to create
* @throws IOException if specified path is file instead of directory
*/
private boolean mkdir(Path path) throws IOException {
Path directory = makeAbsolute(path);
boolean shouldCreate = shouldCreate(directory);
if (shouldCreate) {
forceMkdir(directory);
}
return shouldCreate;
}
/**
* Should mkdir create this directory?
* If the directory is root : false
* If the entry exists and is a directory: false
* If the entry exists and is a file: exception
* else: true
* @param directory path to query
* @return true iff the directory should be created
* @throws IOException IO problems
* @throws ParentNotDirectoryException if the path references a file
*/
private boolean shouldCreate(Path directory) throws IOException {
FileStatus fileStatus;
boolean shouldCreate;
if (isRoot(directory)) {
//its the base dir, bail out immediately
return false;
}
try {
//find out about the path
fileStatus = getFileStatus(directory);
if (!SwiftUtils.isDirectory(fileStatus)) {
//if it's a file, raise an error
throw new ParentNotDirectoryException(
String.format("%s: can't mkdir since it exists and is not a directory: %s",
directory, fileStatus));
} else {
//path exists, and it is a directory
if (LOG.isDebugEnabled()) {
LOG.debug("skipping mkdir(" + directory + ") as it exists already");
}
shouldCreate = false;
}
} catch (FileNotFoundException e) {
shouldCreate = true;
}
return shouldCreate;
}
/**
* mkdir of a directory -irrespective of what was there underneath.
* There are no checks for the directory existing, there not
* being a path there, etc. etc. Those are assumed to have
* taken place already
* @param absolutePath path to create
* @throws IOException IO problems
*/
private void forceMkdir(Path absolutePath) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Making dir '" + absolutePath + "' in Swift");
}
//file is not found: it must be created
store.createDirectory(absolutePath);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param path given path
* @return the statuses of the files/directories in the given path
* @throws IOException
*/
@Override
public FileStatus[] listStatus(Path path) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("SwiftFileSystem.listStatus for: " + path);
}
return store.listSubPaths(makeAbsolute(path), false, true);
}
/**
* This optional operation is not supported
*/
@Override
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress)
throws IOException {
LOG.debug("SwiftFileSystem.append");
throw new SwiftUnsupportedFeatureException("Not supported: append()");
}
/**
* @param permission Currently ignored.
*/
@Override
public FSDataOutputStream create(Path file, FsPermission permission,
boolean overwrite, int bufferSize,
short replication, long blockSize,
Progressable progress)
throws IOException {
LOG.debug("SwiftFileSystem.create");
FileStatus fileStatus = null;
Path absolutePath = makeAbsolute(file);
try {
fileStatus = getFileStatus(absolutePath);
} catch (FileNotFoundException e) {
//the file isn't there.
}
if (fileStatus != null) {
//the path exists -action depends on whether or not it is a directory,
//and what the overwrite policy is.
//What is clear at this point is that if the entry exists, there's
//no need to bother creating any parent entries
if (fileStatus.isDirectory()) {
//here someone is trying to create a file over a directory
/* we can't throw an exception here as there is no easy way to distinguish
a file from the dir
throw new SwiftPathExistsException("Cannot create a file over a directory:"
+ file);
*/
if (LOG.isDebugEnabled()) {
LOG.debug("Overwriting either an empty file or a directory");
}
}
if (overwrite) {
//overwrite set -> delete the object.
store.delete(absolutePath, true);
} else {
throw new FileAlreadyExistsException("Path exists: " + file);
}
} else {
// destination does not exist -trigger creation of the parent
Path parent = file.getParent();
if (parent != null) {
if (!mkdirs(parent)) {
throw new SwiftOperationFailedException(
"Mkdirs failed to create " + parent);
}
}
}
SwiftNativeOutputStream out = createSwiftOutputStream(file);
return new FSDataOutputStream(out, statistics);
}
/**
* Create the swift output stream
* @param path path to write to
* @return the new file
* @throws IOException
*/
protected SwiftNativeOutputStream createSwiftOutputStream(Path path) throws
IOException {
long partSizeKB = getStore().getPartsizeKB();
return new SwiftNativeOutputStream(getConf(),
getStore(),
path.toUri().toString(),
partSizeKB);
}
/**
* Opens an FSDataInputStream at the indicated Path.
*
* @param path the file name to open
* @param bufferSize the size of the buffer to be used.
* @return the input stream
* @throws FileNotFoundException if the file is not found
* @throws IOException any IO problem
*/
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
int bufferSizeKB = getStore().getBufferSizeKB();
long readBlockSize = bufferSizeKB * 1024L;
return open(path, bufferSize, readBlockSize);
}
/**
* Low-level operation to also set the block size for this operation
* @param path the file name to open
* @param bufferSize the size of the buffer to be used.
* @param readBlockSize how big should the read blockk/buffer size be?
* @return the input stream
* @throws FileNotFoundException if the file is not found
* @throws IOException any IO problem
*/
public FSDataInputStream open(Path path,
int bufferSize,
long readBlockSize) throws IOException {
if (readBlockSize <= 0) {
throw new SwiftConfigurationException("Bad remote buffer size");
}
Path absolutePath = makeAbsolute(path);
return new FSDataInputStream(
new StrictBufferedFSInputStream(
new SwiftNativeInputStream(store,
statistics,
absolutePath,
readBlockSize),
bufferSize));
}
/**
* Renames Path src to Path dst. On swift this uses copy-and-delete
* and <i>is not atomic</i>.
*
* @param src path
* @param dst path
* @return true if directory renamed, false otherwise
* @throws IOException on problems
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
try {
store.rename(makeAbsolute(src), makeAbsolute(dst));
//success
return true;
} catch (SwiftOperationFailedException e) {
//downgrade to a failure
return false;
} catch (FileAlreadyExistsException e) {
//downgrade to a failure
return false;
} catch (FileNotFoundException e) {
//downgrade to a failure
return false;
}
}
/**
* Delete a file or directory
*
* @param path the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception if the
* directory is not empty
* case of a file the recursive can be set to either true or false.
* @return true if the object was deleted
* @throws IOException IO problems
*/
@Override
public boolean delete(Path path, boolean recursive) throws IOException {
try {
return store.delete(path, recursive);
} catch (FileNotFoundException e) {
//base path was not found.
return false;
}
}
/**
* Delete a file.
* This method is abstract in Hadoop 1.x; in 2.x+ it is non-abstract
* and deprecated
*/
@Override
public boolean delete(Path f) throws IOException {
return delete(f, true);
}
/**
* Makes path absolute
*
* @param path path to file
* @return absolute path
*/
protected Path makeAbsolute(Path path) {
if (path.isAbsolute()) {
return path;
}
return new Path(workingDir, path);
}
/**
* Get the current operation statistics
* @return a snapshot of the statistics
*/
public List<DurationStats> getOperationStatistics() {
return store.getOperationStatistics();
}
/**
* Low level method to do a deep listing of all entries, not stopping
* at the next directory entry. This is to let tests be confident that
* recursive deletes really are working.
* @param path path to recurse down
* @param newest ask for the newest data, potentially slower than not.
* @return a potentially empty array of file status
* @throws IOException any problem
*/
@InterfaceAudience.Private
public FileStatus[] listRawFileStatus(Path path, boolean newest) throws IOException {
return store.listSubPaths(makeAbsolute(path), true, newest);
}
/**
* Get the number of partitions written by an output stream
* This is for testing
* @param outputStream output stream
* @return the #of partitions written by that stream
*/
@InterfaceAudience.Private
public static int getPartitionsWritten(FSDataOutputStream outputStream) {
SwiftNativeOutputStream snos = getSwiftNativeOutputStream(outputStream);
return snos.getPartitionsWritten();
}
private static SwiftNativeOutputStream getSwiftNativeOutputStream(
FSDataOutputStream outputStream) {
OutputStream wrappedStream = outputStream.getWrappedStream();
return (SwiftNativeOutputStream) wrappedStream;
}
/**
* Get the size of partitions written by an output stream
* This is for testing
*
* @param outputStream output stream
* @return partition size in bytes
*/
@InterfaceAudience.Private
public static long getPartitionSize(FSDataOutputStream outputStream) {
SwiftNativeOutputStream snos = getSwiftNativeOutputStream(outputStream);
return snos.getFilePartSize();
}
/**
* Get the the number of bytes written to an output stream
* This is for testing
*
* @param outputStream output stream
* @return partition size in bytes
*/
@InterfaceAudience.Private
public static long getBytesWritten(FSDataOutputStream outputStream) {
SwiftNativeOutputStream snos = getSwiftNativeOutputStream(outputStream);
return snos.getBytesWritten();
}
/**
* Get the the number of bytes uploaded by an output stream
* to the swift cluster.
* This is for testing
*
* @param outputStream output stream
* @return partition size in bytes
*/
@InterfaceAudience.Private
public static long getBytesUploaded(FSDataOutputStream outputStream) {
SwiftNativeOutputStream snos = getSwiftNativeOutputStream(outputStream);
return snos.getBytesUploaded();
}
}
| 22,214 | 29.940111 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.snative;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
/**
* A subclass of {@link FileStatus} that contains the
* Swift-specific rules of when a file is considered to be a directory.
*/
public class SwiftFileStatus extends FileStatus {
public SwiftFileStatus() {
}
public SwiftFileStatus(long length,
boolean isdir,
int block_replication,
long blocksize, long modification_time, Path path) {
super(length, isdir, block_replication, blocksize, modification_time, path);
}
public SwiftFileStatus(long length,
boolean isdir,
int block_replication,
long blocksize,
long modification_time,
long access_time,
FsPermission permission,
String owner, String group, Path path) {
super(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, path);
}
//HDFS2+ only
public SwiftFileStatus(long length,
boolean isdir,
int block_replication,
long blocksize,
long modification_time,
long access_time,
FsPermission permission,
String owner, String group, Path symlink, Path path) {
super(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, symlink, path);
}
/**
* Declare that the path represents a directory, which in the
* SwiftNativeFileSystem means "is a directory or a 0 byte file"
*
* @return true if the status is considered to be a file
*/
@Override
public boolean isDir() {
return super.isDirectory() || getLen() == 0;
}
/**
* A entry is a file if it is not a directory.
* By implementing it <i>and not marking as an override</i> this
* subclass builds and runs in both Hadoop versions.
* @return the opposite value to {@link #isDir()}
*/
@Override
public boolean isFile() {
return !isDir();
}
/**
* Directory test
* @return true if the file is considered to be a directory
*/
public boolean isDirectory() {
return isDir();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName());
sb.append("{ ");
sb.append("path=").append(getPath());
sb.append("; isDirectory=").append(isDir());
sb.append("; length=").append(getLen());
sb.append("; blocksize=").append(getBlockSize());
sb.append("; modification_time=").append(getModificationTime());
sb.append("}");
return sb.toString();
}
}
| 3,777 | 33.036036 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.snative;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
import org.apache.hadoop.fs.swift.exceptions.SwiftException;
import org.apache.hadoop.fs.swift.http.HttpBodyContent;
import org.apache.hadoop.fs.swift.http.HttpInputStreamWithRelease;
import org.apache.hadoop.fs.swift.util.SwiftUtils;
import java.io.EOFException;
import java.io.IOException;
/**
* The input stream from remote Swift blobs.
* The class attempts to be buffer aware, and react to a forward seek operation
* by trying to scan ahead through the current block of data to find it.
* This accelerates some operations that do a lot of seek()/read() actions,
* including work (such as in the MR engine) that do a seek() immediately after
* an open().
*/
class SwiftNativeInputStream extends FSInputStream {
private static final Log LOG = LogFactory.getLog(SwiftNativeInputStream.class);
/**
* range requested off the server: {@value}
*/
private final long bufferSize;
/**
* File nativeStore instance
*/
private final SwiftNativeFileSystemStore nativeStore;
/**
* Hadoop statistics. Used to get info about number of reads, writes, etc.
*/
private final FileSystem.Statistics statistics;
/**
* Data input stream
*/
private HttpInputStreamWithRelease httpStream;
/**
* File path
*/
private final Path path;
/**
* Current position
*/
private long pos = 0;
/**
* Length of the file picked up at start time
*/
private long contentLength = -1;
/**
* Why the stream is closed
*/
private String reasonClosed = "unopened";
/**
* Offset in the range requested last
*/
private long rangeOffset = 0;
public SwiftNativeInputStream(SwiftNativeFileSystemStore storeNative,
FileSystem.Statistics statistics, Path path, long bufferSize)
throws IOException {
this.nativeStore = storeNative;
this.statistics = statistics;
this.path = path;
if (bufferSize <= 0) {
throw new IllegalArgumentException("Invalid buffer size");
}
this.bufferSize = bufferSize;
//initial buffer fill
this.httpStream = storeNative.getObject(path).getInputStream();
//fillBuffer(0);
}
/**
* Move to a new position within the file relative to where the pointer is now.
* Always call from a synchronized clause
* @param offset offset
*/
private synchronized void incPos(int offset) {
pos += offset;
rangeOffset += offset;
SwiftUtils.trace(LOG, "Inc: pos=%d bufferOffset=%d", pos, rangeOffset);
}
/**
* Update the start of the buffer; always call from a sync'd clause
* @param seekPos position sought.
* @param contentLength content length provided by response (may be -1)
*/
private synchronized void updateStartOfBufferPosition(long seekPos,
long contentLength) {
//reset the seek pointer
pos = seekPos;
//and put the buffer offset to 0
rangeOffset = 0;
this.contentLength = contentLength;
SwiftUtils.trace(LOG, "Move: pos=%d; bufferOffset=%d; contentLength=%d",
pos,
rangeOffset,
contentLength);
}
@Override
public synchronized int read() throws IOException {
verifyOpen();
int result = -1;
try {
result = httpStream.read();
} catch (IOException e) {
String msg = "IOException while reading " + path
+ ": " +e + ", attempting to reopen.";
LOG.debug(msg, e);
if (reopenBuffer()) {
result = httpStream.read();
}
}
if (result != -1) {
incPos(1);
}
if (statistics != null && result != -1) {
statistics.incrementBytesRead(1);
}
return result;
}
@Override
public synchronized int read(byte[] b, int off, int len) throws IOException {
SwiftUtils.debug(LOG, "read(buffer, %d, %d)", off, len);
SwiftUtils.validateReadArgs(b, off, len);
int result = -1;
try {
verifyOpen();
result = httpStream.read(b, off, len);
} catch (IOException e) {
//other IO problems are viewed as transient and re-attempted
LOG.info("Received IOException while reading '" + path +
"', attempting to reopen: " + e);
LOG.debug("IOE on read()" + e, e);
if (reopenBuffer()) {
result = httpStream.read(b, off, len);
}
}
if (result > 0) {
incPos(result);
if (statistics != null) {
statistics.incrementBytesRead(result);
}
}
return result;
}
/**
* Re-open the buffer
* @return true iff more data could be added to the buffer
* @throws IOException if not
*/
private boolean reopenBuffer() throws IOException {
innerClose("reopening buffer to trigger refresh");
boolean success = false;
try {
fillBuffer(pos);
success = true;
} catch (EOFException eof) {
//the EOF has been reached
this.reasonClosed = "End of file";
}
return success;
}
/**
* close the stream. After this the stream is not usable -unless and until
* it is re-opened (which can happen on some of the buffer ops)
* This method is thread-safe and idempotent.
*
* @throws IOException on IO problems.
*/
@Override
public synchronized void close() throws IOException {
innerClose("closed");
}
private void innerClose(String reason) throws IOException {
try {
if (httpStream != null) {
reasonClosed = reason;
if (LOG.isDebugEnabled()) {
LOG.debug("Closing HTTP input stream : " + reason);
}
httpStream.close();
}
} finally {
httpStream = null;
}
}
/**
* Assume that the connection is not closed: throws an exception if it is
* @throws SwiftConnectionClosedException
*/
private void verifyOpen() throws SwiftConnectionClosedException {
if (httpStream == null) {
throw new SwiftConnectionClosedException(reasonClosed);
}
}
@Override
public synchronized String toString() {
return "SwiftNativeInputStream" +
" position=" + pos
+ " buffer size = " + bufferSize
+ " "
+ (httpStream != null ? httpStream.toString()
: (" no input stream: " + reasonClosed));
}
/**
* Treats any finalize() call without the input stream being closed
* as a serious problem, logging at error level
* @throws Throwable n/a
*/
@Override
protected void finalize() throws Throwable {
if (httpStream != null) {
LOG.error(
"Input stream is leaking handles by not being closed() properly: "
+ httpStream.toString());
}
}
/**
* Read through the specified number of bytes.
* The implementation iterates a byte a time, which may seem inefficient
* compared to the read(bytes[]) method offered by input streams.
* However, if you look at the code that implements that method, it comes
* down to read() one char at a time -only here the return value is discarded.
*
*<p/>
* This is a no-op if the stream is closed
* @param bytes number of bytes to read.
* @throws IOException IO problems
* @throws SwiftException if a read returned -1.
*/
private int chompBytes(long bytes) throws IOException {
int count = 0;
if (httpStream != null) {
int result;
for (long i = 0; i < bytes; i++) {
result = httpStream.read();
if (result < 0) {
throw new SwiftException("Received error code while chomping input");
}
count ++;
incPos(1);
}
}
return count;
}
/**
* Seek to an offset. If the data is already in the buffer, move to it
* @param targetPos target position
* @throws IOException on any problem
*/
@Override
public synchronized void seek(long targetPos) throws IOException {
if (targetPos < 0) {
throw new EOFException(
FSExceptionMessages.NEGATIVE_SEEK);
}
//there's some special handling of near-local data
//as the seek can be omitted if it is in/adjacent
long offset = targetPos - pos;
if (LOG.isDebugEnabled()) {
LOG.debug("Seek to " + targetPos + "; current pos =" + pos
+ "; offset="+offset);
}
if (offset == 0) {
LOG.debug("seek is no-op");
return;
}
if (offset < 0) {
LOG.debug("seek is backwards");
} else if ((rangeOffset + offset < bufferSize)) {
//if the seek is in range of that requested, scan forwards
//instead of closing and re-opening a new HTTP connection
SwiftUtils.debug(LOG,
"seek is within current stream"
+ "; pos= %d ; targetPos=%d; "
+ "offset= %d ; bufferOffset=%d",
pos, targetPos, offset, rangeOffset);
try {
LOG.debug("chomping ");
chompBytes(offset);
} catch (IOException e) {
//this is assumed to be recoverable with a seek -or more likely to fail
LOG.debug("while chomping ",e);
}
if (targetPos - pos == 0) {
LOG.trace("chomping successful");
return;
}
LOG.trace("chomping failed");
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Seek is beyond buffer size of " + bufferSize);
}
}
innerClose("seeking to " + targetPos);
fillBuffer(targetPos);
}
/**
* Fill the buffer from the target position
* If the target position == current position, the
* read still goes ahead; this is a way of handling partial read failures
* @param targetPos target position
* @throws IOException IO problems on the read
*/
private void fillBuffer(long targetPos) throws IOException {
long length = targetPos + bufferSize;
SwiftUtils.debug(LOG, "Fetching %d bytes starting at %d", length, targetPos);
HttpBodyContent blob = nativeStore.getObject(path, targetPos, length);
httpStream = blob.getInputStream();
updateStartOfBufferPosition(targetPos, blob.getContentLength());
}
@Override
public synchronized long getPos() throws IOException {
return pos;
}
/**
* This FS doesn't explicitly support multiple data sources, so
* return false here.
* @param targetPos the desired target position
* @return true if a new source of the data has been set up
* as the source of future reads
* @throws IOException IO problems
*/
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
}
| 11,671 | 29.554974 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/StrictBufferedFSInputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.snative;
import org.apache.hadoop.fs.BufferedFSInputStream;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
import java.io.EOFException;
import java.io.IOException;
/**
* Add stricter compliance with the evolving FS specifications
*/
public class StrictBufferedFSInputStream extends BufferedFSInputStream {
public StrictBufferedFSInputStream(FSInputStream in,
int size) {
super(in, size);
}
@Override
public void seek(long pos) throws IOException {
if (pos < 0) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
}
if (in == null) {
throw new SwiftConnectionClosedException(FSExceptionMessages.STREAM_IS_CLOSED);
}
super.seek(pos);
}
}
| 1,712 | 33.26 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftBadRequestException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
import org.apache.commons.httpclient.HttpMethod;
import java.net.URI;
/**
* Thrown to indicate that data locality can't be calculated or requested path is incorrect.
* Data locality can't be calculated if Openstack Swift version is old.
*/
public class SwiftBadRequestException extends SwiftInvalidResponseException {
public SwiftBadRequestException(String message,
String operation,
URI uri,
HttpMethod method) {
super(message, operation, uri, method);
}
public SwiftBadRequestException(String message,
int statusCode,
String operation,
URI uri) {
super(message, statusCode, operation, uri);
}
@Override
public String exceptionTitle() {
return "BadRequest";
}
}
| 1,757 | 34.16 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftJsonMarshallingException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
/**
* Exception raised when the J/O mapping fails.
*/
public class SwiftJsonMarshallingException extends SwiftException {
public SwiftJsonMarshallingException(String message) {
super(message);
}
public SwiftJsonMarshallingException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,182 | 33.794118 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftAuthenticationFailedException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
import org.apache.commons.httpclient.HttpMethod;
import java.net.URI;
/**
* An exception raised when an authentication request was rejected
*/
public class SwiftAuthenticationFailedException extends SwiftInvalidResponseException {
public SwiftAuthenticationFailedException(String message,
int statusCode,
String operation,
URI uri) {
super(message, statusCode, operation, uri);
}
public SwiftAuthenticationFailedException(String message,
String operation,
URI uri,
HttpMethod method) {
super(message, operation, uri, method);
}
@Override
public String exceptionTitle() {
return "Authentication Failure";
}
}
| 1,773 | 35.204082 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftConnectionException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
/**
* Thrown to indicate that connection is lost or failed to be made
*/
public class SwiftConnectionException extends SwiftException {
public SwiftConnectionException() {
}
public SwiftConnectionException(String message) {
super(message);
}
public SwiftConnectionException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,216 | 32.805556 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftConfigurationException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
/**
* Exception raised to indicate there is some problem with how the Swift FS
* is configured
*/
public class SwiftConfigurationException extends SwiftException {
public SwiftConfigurationException(String message) {
super(message);
}
public SwiftConfigurationException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,220 | 34.911765 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftUnsupportedFeatureException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
/**
* Exception raised on an unsupported feature in the FS API -such as
* <code>append()</code>
*/
public class SwiftUnsupportedFeatureException extends SwiftException {
public SwiftUnsupportedFeatureException(String message) {
super(message);
}
}
| 1,128 | 35.419355 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftOperationFailedException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
/**
* Used to relay exceptions upstream from the inner implementation
* to the public API, where it is downgraded to a log+failure.
* Making it visible internally aids testing
*/
public class SwiftOperationFailedException extends SwiftException {
public SwiftOperationFailedException(String message) {
super(message);
}
public SwiftOperationFailedException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,309 | 35.388889 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftInvalidResponseException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
import org.apache.commons.httpclient.HttpMethod;
import java.io.IOException;
import java.net.URI;
/**
* Exception raised when the HTTP code is invalid. The status code,
* method name and operation URI are all in the response.
*/
public class SwiftInvalidResponseException extends SwiftConnectionException {
public final int statusCode;
public final String operation;
public final URI uri;
public final String body;
public SwiftInvalidResponseException(String message,
int statusCode,
String operation,
URI uri) {
super(message);
this.statusCode = statusCode;
this.operation = operation;
this.uri = uri;
this.body = "";
}
public SwiftInvalidResponseException(String message,
String operation,
URI uri,
HttpMethod method) {
super(message);
this.statusCode = method.getStatusCode();
this.operation = operation;
this.uri = uri;
String bodyAsString;
try {
bodyAsString = method.getResponseBodyAsString();
if (bodyAsString == null) {
bodyAsString = "";
}
} catch (IOException e) {
bodyAsString = "";
}
this.body = bodyAsString;
}
public int getStatusCode() {
return statusCode;
}
public String getOperation() {
return operation;
}
public URI getUri() {
return uri;
}
public String getBody() {
return body;
}
/**
* Override point: title of an exception -this is used in the
* toString() method.
* @return the new exception title
*/
public String exceptionTitle() {
return "Invalid Response";
}
/**
* Build a description that includes the exception title, the URI,
* the message, the status code -and any body of the response
* @return the string value for display
*/
@Override
public String toString() {
StringBuilder msg = new StringBuilder();
msg.append(exceptionTitle());
msg.append(": ");
msg.append(getMessage());
msg.append(" ");
msg.append(operation);
msg.append(" ");
msg.append(uri);
msg.append(" => ");
msg.append(statusCode);
if (body != null && !body.isEmpty()) {
msg.append(" : ");
msg.append(body);
}
return msg.toString();
}
}
| 3,295 | 26.932203 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftConnectionClosedException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
/**
* Exception raised when an attempt is made to use a closed stream
*/
public class SwiftConnectionClosedException extends SwiftException {
public static final String MESSAGE =
"Connection to Swift service has been closed";
public SwiftConnectionClosedException() {
super(MESSAGE);
}
public SwiftConnectionClosedException(String reason) {
super(MESSAGE + ": " + reason);
}
}
| 1,259 | 33.054054 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftInternalStateException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
/**
* The internal state of the Swift client is wrong -presumably a sign
* of some bug
*/
public class SwiftInternalStateException extends SwiftException {
public SwiftInternalStateException(String message) {
super(message);
}
public SwiftInternalStateException(String message, Throwable cause) {
super(message, cause);
}
public SwiftInternalStateException(Throwable cause) {
super(cause);
}
}
| 1,292 | 32.153846 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
import java.io.IOException;
/**
* A Swift-specific exception -subclasses exist
* for various specific problems.
*/
public class SwiftException extends IOException {
public SwiftException() {
super();
}
public SwiftException(String message) {
super(message);
}
public SwiftException(String message, Throwable cause) {
super(message, cause);
}
public SwiftException(Throwable cause) {
super(cause);
}
}
| 1,296 | 28.477273 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftThrottledRequestException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.exceptions;
import org.apache.commons.httpclient.HttpMethod;
import java.net.URI;
/**
* Exception raised if a Swift endpoint returned a HTTP response indicating
* the caller is being throttled.
*/
public class SwiftThrottledRequestException extends
SwiftInvalidResponseException {
public SwiftThrottledRequestException(String message,
String operation,
URI uri,
HttpMethod method) {
super(message, operation, uri, method);
}
}
| 1,460 | 37.447368 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/RestClientBindings.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
import java.net.URI;
import java.util.Properties;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.*;
/**
* This class implements the binding logic between Hadoop configurations
* and the swift rest client.
* <p>
* The swift rest client takes a Properties instance containing
* the string values it uses to bind to a swift endpoint.
* <p>
* This class extracts the values for a specific filesystem endpoint
* and then builds an appropriate Properties file.
*/
public final class RestClientBindings {
private static final Log LOG = LogFactory.getLog(RestClientBindings.class);
public static final String E_INVALID_NAME = "Invalid swift hostname '%s':" +
" hostname must in form container.service";
/**
* Public for testing : build the full prefix for use in resolving
* configuration items
*
* @param service service to use
* @return the prefix string <i>without any trailing "."</i>
*/
public static String buildSwiftInstancePrefix(String service) {
return SWIFT_SERVICE_PREFIX + service;
}
/**
* Raise an exception for an invalid service name
*
* @param hostname hostname that was being parsed
* @return an exception to throw
*/
private static SwiftConfigurationException invalidName(String hostname) {
return new SwiftConfigurationException(
String.format(E_INVALID_NAME, hostname));
}
/**
* Get the container name from the hostname -the single element before the
* first "." in the hostname
*
* @param hostname hostname to split
* @return the container
* @throws SwiftConfigurationException
*/
public static String extractContainerName(String hostname) throws
SwiftConfigurationException {
int i = hostname.indexOf(".");
if (i <= 0) {
throw invalidName(hostname);
}
return hostname.substring(0, i);
}
public static String extractContainerName(URI uri) throws
SwiftConfigurationException {
return extractContainerName(uri.getHost());
}
/**
* Get the service name from a longer hostname string
*
* @param hostname hostname
* @return the separated out service name
* @throws SwiftConfigurationException if the hostname was invalid
*/
public static String extractServiceName(String hostname) throws
SwiftConfigurationException {
int i = hostname.indexOf(".");
if (i <= 0) {
throw invalidName(hostname);
}
String service = hostname.substring(i + 1);
if (service.isEmpty() || service.contains(".")) {
//empty service contains dots in -not currently supported
throw invalidName(hostname);
}
return service;
}
public static String extractServiceName(URI uri) throws
SwiftConfigurationException {
return extractServiceName(uri.getHost());
}
/**
* Build a properties instance bound to the configuration file -using
* the filesystem URI as the source of the information.
*
* @param fsURI filesystem URI
* @param conf configuration
* @return a properties file with the instance-specific properties extracted
* and bound to the swift client properties.
* @throws SwiftConfigurationException if the configuration is invalid
*/
public static Properties bind(URI fsURI, Configuration conf) throws
SwiftConfigurationException {
String host = fsURI.getHost();
if (host == null || host.isEmpty()) {
//expect shortnames -> conf names
throw invalidName(host);
}
String container = extractContainerName(host);
String service = extractServiceName(host);
//build filename schema
String prefix = buildSwiftInstancePrefix(service);
if (LOG.isDebugEnabled()) {
LOG.debug("Filesystem " + fsURI
+ " is using configuration keys " + prefix);
}
Properties props = new Properties();
props.setProperty(SWIFT_SERVICE_PROPERTY, service);
props.setProperty(SWIFT_CONTAINER_PROPERTY, container);
copy(conf, prefix + DOT_AUTH_URL, props, SWIFT_AUTH_PROPERTY, true);
copy(conf, prefix + DOT_USERNAME, props, SWIFT_USERNAME_PROPERTY, true);
copy(conf, prefix + DOT_APIKEY, props, SWIFT_APIKEY_PROPERTY, false);
copy(conf, prefix + DOT_PASSWORD, props, SWIFT_PASSWORD_PROPERTY,
props.contains(SWIFT_APIKEY_PROPERTY) ? true : false);
copy(conf, prefix + DOT_TENANT, props, SWIFT_TENANT_PROPERTY, false);
copy(conf, prefix + DOT_REGION, props, SWIFT_REGION_PROPERTY, false);
copy(conf, prefix + DOT_HTTP_PORT, props, SWIFT_HTTP_PORT_PROPERTY, false);
copy(conf, prefix +
DOT_HTTPS_PORT, props, SWIFT_HTTPS_PORT_PROPERTY, false);
copyBool(conf, prefix + DOT_PUBLIC, props, SWIFT_PUBLIC_PROPERTY, false);
copyBool(conf, prefix + DOT_LOCATION_AWARE, props,
SWIFT_LOCATION_AWARE_PROPERTY, false);
return props;
}
/**
* Extract a boolean value from the configuration and copy it to the
* properties instance.
* @param conf source configuration
* @param confKey key in the configuration file
* @param props destination property set
* @param propsKey key in the property set
* @param defVal default value
*/
private static void copyBool(Configuration conf,
String confKey,
Properties props,
String propsKey,
boolean defVal) {
boolean b = conf.getBoolean(confKey, defVal);
props.setProperty(propsKey, Boolean.toString(b));
}
private static void set(Properties props, String key, String optVal) {
if (optVal != null) {
props.setProperty(key, optVal);
}
}
/**
* Copy a (trimmed) property from the configuration file to the properties file.
* <p>
* If marked as required and not found in the configuration, an
* exception is raised.
* If not required -and missing- then the property will not be set.
* In this case, if the property is already in the Properties instance,
* it will remain untouched.
*
* @param conf source configuration
* @param confKey key in the configuration file
* @param props destination property set
* @param propsKey key in the property set
* @param required is the property required
* @throws SwiftConfigurationException if the property is required but was
* not found in the configuration instance.
*/
public static void copy(Configuration conf, String confKey, Properties props,
String propsKey,
boolean required) throws SwiftConfigurationException {
//TODO: replace. version compatibility issue conf.getTrimmed fails with NoSuchMethodError
String val = conf.get(confKey);
if (val != null) {
val = val.trim();
}
if (required && val == null) {
throw new SwiftConfigurationException(
"Missing mandatory configuration option: "
+
confKey);
}
set(props, propsKey, val);
}
}
| 8,163 | 35.284444 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
import org.apache.commons.httpclient.HttpMethod;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
import org.apache.hadoop.fs.swift.util.SwiftUtils;
import java.io.ByteArrayInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
/**
* This replaces the input stream release class from JetS3t and AWS;
* # Failures in the constructor are relayed up instead of simply logged.
* # it is set up to be more robust at teardown
* # release logic is thread safe
* Note that the thread safety of the inner stream contains no thread
* safety guarantees -this stream is not to be read across streams.
* The thread safety logic here is to ensure that even if somebody ignores
* that rule, the release code does not get entered twice -and that
* any release in one thread is picked up by read operations in all others.
*/
public class HttpInputStreamWithRelease extends InputStream {
private static final Log LOG =
LogFactory.getLog(HttpInputStreamWithRelease.class);
private final URI uri;
private HttpMethod method;
//flag to say the stream is released -volatile so that read operations
//pick it up even while unsynchronized.
private volatile boolean released;
//volatile flag to verify that data is consumed.
private volatile boolean dataConsumed;
private InputStream inStream;
/**
* In debug builds, this is filled in with the construction-time
* stack, which is then included in logs from the finalize(), method.
*/
private final Exception constructionStack;
/**
* Why the stream is closed
*/
private String reasonClosed = "unopened";
public HttpInputStreamWithRelease(URI uri, HttpMethod method) throws
IOException {
this.uri = uri;
this.method = method;
constructionStack = LOG.isDebugEnabled() ? new Exception("stack") : null;
if (method == null) {
throw new IllegalArgumentException("Null 'method' parameter ");
}
try {
inStream = method.getResponseBodyAsStream();
} catch (IOException e) {
inStream = new ByteArrayInputStream(new byte[]{});
throw releaseAndRethrow("getResponseBodyAsStream() in constructor -" + e, e);
}
}
@Override
public void close() throws IOException {
release("close()", null);
}
/**
* Release logic
* @param reason reason for release (used in debug messages)
* @param ex exception that is a cause -null for non-exceptional releases
* @return true if the release took place here
* @throws IOException if the abort or close operations failed.
*/
private synchronized boolean release(String reason, Exception ex) throws
IOException {
if (!released) {
reasonClosed = reason;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Releasing connection to " + uri + ": " + reason, ex);
}
if (method != null) {
if (!dataConsumed) {
method.abort();
}
method.releaseConnection();
}
if (inStream != null) {
//this guard may seem un-needed, but a stack trace seen
//on the JetS3t predecessor implied that it
//is useful
inStream.close();
}
return true;
} finally {
//if something went wrong here, we do not want the release() operation
//to try and do anything in advance.
released = true;
dataConsumed = true;
}
} else {
return false;
}
}
/**
* Release the method, using the exception as a cause
* @param operation operation that failed
* @param ex the exception which triggered it.
* @return the exception to throw
*/
private IOException releaseAndRethrow(String operation, IOException ex) {
try {
release(operation, ex);
} catch (IOException ioe) {
LOG.debug("Exception during release: " + operation + " - " + ioe, ioe);
//make this the exception if there was none before
if (ex == null) {
ex = ioe;
}
}
return ex;
}
/**
* Assume that the connection is not released: throws an exception if it is
* @throws SwiftConnectionClosedException
*/
private synchronized void assumeNotReleased() throws SwiftConnectionClosedException {
if (released || inStream == null) {
throw new SwiftConnectionClosedException(reasonClosed);
}
}
@Override
public int available() throws IOException {
assumeNotReleased();
try {
return inStream.available();
} catch (IOException e) {
throw releaseAndRethrow("available() failed -" + e, e);
}
}
@Override
public int read() throws IOException {
assumeNotReleased();
int read = 0;
try {
read = inStream.read();
} catch (EOFException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("EOF exception " + e, e);
}
read = -1;
} catch (IOException e) {
throw releaseAndRethrow("read()", e);
}
if (read < 0) {
dataConsumed = true;
release("read() -all data consumed", null);
}
return read;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
SwiftUtils.validateReadArgs(b, off, len);
//if the stream is already closed, then report an exception.
assumeNotReleased();
//now read in a buffer, reacting differently to different operations
int read;
try {
read = inStream.read(b, off, len);
} catch (EOFException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("EOF exception " + e, e);
}
read = -1;
} catch (IOException e) {
throw releaseAndRethrow("read(b, off, " + len + ")", e);
}
if (read < 0) {
dataConsumed = true;
release("read() -all data consumed", null);
}
return read;
}
/**
* Finalizer does release the stream, but also logs at WARN level
* including the URI at fault
*/
@Override
protected void finalize() {
try {
if (release("finalize()", constructionStack)) {
LOG.warn("input stream of " + uri
+ " not closed properly -cleaned up in finalize()");
}
} catch (Exception e) {
//swallow anything that failed here
LOG.warn("Exception while releasing " + uri + "in finalizer",
e);
}
}
@Override
public String toString() {
return "HttpInputStreamWithRelease working with " + uri
+" released=" + released
+" dataConsumed=" + dataConsumed;
}
}
| 7,581 | 31.127119 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/SwiftRestClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
import org.apache.commons.httpclient.DefaultHttpMethodRetryHandler;
import org.apache.commons.httpclient.Header;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpHost;
import org.apache.commons.httpclient.HttpMethod;
import org.apache.commons.httpclient.HttpMethodBase;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.methods.DeleteMethod;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.methods.HeadMethod;
import org.apache.commons.httpclient.methods.InputStreamRequestEntity;
import org.apache.commons.httpclient.methods.PostMethod;
import org.apache.commons.httpclient.methods.PutMethod;
import org.apache.commons.httpclient.methods.StringRequestEntity;
import org.apache.commons.httpclient.params.HttpConnectionParams;
import org.apache.commons.httpclient.params.HttpMethodParams;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.swift.auth.ApiKeyAuthenticationRequest;
import org.apache.hadoop.fs.swift.auth.ApiKeyCredentials;
import org.apache.hadoop.fs.swift.auth.AuthenticationRequest;
import org.apache.hadoop.fs.swift.auth.AuthenticationRequestWrapper;
import org.apache.hadoop.fs.swift.auth.AuthenticationResponse;
import org.apache.hadoop.fs.swift.auth.AuthenticationWrapper;
import org.apache.hadoop.fs.swift.auth.KeyStoneAuthRequest;
import org.apache.hadoop.fs.swift.auth.KeystoneApiKeyCredentials;
import org.apache.hadoop.fs.swift.auth.PasswordAuthenticationRequest;
import org.apache.hadoop.fs.swift.auth.PasswordCredentials;
import org.apache.hadoop.fs.swift.auth.entities.AccessToken;
import org.apache.hadoop.fs.swift.auth.entities.Catalog;
import org.apache.hadoop.fs.swift.auth.entities.Endpoint;
import org.apache.hadoop.fs.swift.exceptions.SwiftAuthenticationFailedException;
import org.apache.hadoop.fs.swift.exceptions.SwiftBadRequestException;
import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
import org.apache.hadoop.fs.swift.exceptions.SwiftException;
import org.apache.hadoop.fs.swift.exceptions.SwiftInternalStateException;
import org.apache.hadoop.fs.swift.exceptions.SwiftInvalidResponseException;
import org.apache.hadoop.fs.swift.exceptions.SwiftThrottledRequestException;
import org.apache.hadoop.fs.swift.util.Duration;
import org.apache.hadoop.fs.swift.util.DurationStats;
import org.apache.hadoop.fs.swift.util.DurationStatsTable;
import org.apache.hadoop.fs.swift.util.JSONUtil;
import org.apache.hadoop.fs.swift.util.SwiftObjectPath;
import org.apache.hadoop.fs.swift.util.SwiftUtils;
import org.apache.http.conn.params.ConnRoutePNames;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.util.List;
import java.util.Properties;
import static org.apache.commons.httpclient.HttpStatus.*;
import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.*;
/**
* This implements the client-side of the Swift REST API
*
* The core actions put, get and query data in the Swift object store,
* after authenticationg the client.
*
* <b>Logging:</b>
*
* Logging at DEBUG level displays detail about the actions of this
* client, including HTTP requests and responses -excluding authentication
* details.
*/
public final class SwiftRestClient {
private static final Log LOG = LogFactory.getLog(SwiftRestClient.class);
/**
* Header that says "use newest version" -ensures that
* the query doesn't pick up older versions served by
* an eventually consistent filesystem (except in the special case
* of a network partition, at which point no guarantees about
* consistency can be made.
*/
public static final Header NEWEST =
new Header(SwiftProtocolConstants.X_NEWEST, "true");
/**
* the authentication endpoint as supplied in the configuration
*/
private final URI authUri;
/**
* Swift region. Some OpenStack installations has more than one region.
* In this case user can specify the region with which Hadoop will be working
*/
private final String region;
/**
* tenant name
*/
private final String tenant;
/**
* username name
*/
private final String username;
/**
* user password
*/
private final String password;
/**
* user api key
*/
private final String apiKey;
/**
* The authentication request used to authenticate with Swift
*/
private final AuthenticationRequest authRequest;
/**
* This auth request is similar to @see authRequest,
* with one difference: it has another json representation when
* authRequest one is not applicable
*/
private AuthenticationRequest keystoneAuthRequest;
private boolean useKeystoneAuthentication = false;
/**
* The container this client is working with
*/
private final String container;
private final String serviceDescription;
/**
* Access token (Secret)
*/
private AccessToken token;
/**
* Endpoint for swift operations, obtained after authentication
*/
private URI endpointURI;
/**
* URI under which objects can be found.
* This is set when the user is authenticated -the URI
* is returned in the body of the success response.
*/
private URI objectLocationURI;
private final URI filesystemURI;
/**
* The name of the service provider
*/
private final String serviceProvider;
/**
* Should the public swift endpoint be used, rather than the in-cluster one?
*/
private final boolean usePublicURL;
/**
* Number of times to retry a connection
*/
private final int retryCount;
/**
* How long (in milliseconds) should a connection be attempted
*/
private final int connectTimeout;
/**
* How long (in milliseconds) should a connection be attempted
*/
private final int socketTimeout;
/**
* How long (in milliseconds) between bulk operations
*/
private final int throttleDelay;
/**
* the name of a proxy host (can be null, in which case there is no proxy)
*/
private String proxyHost;
/**
* The port of a proxy. This is ignored if {@link #proxyHost} is null
*/
private int proxyPort;
/**
* Flag to indicate whether or not the client should
* query for file location data.
*/
private final boolean locationAware;
private final int partSizeKB;
/**
* The blocksize of this FS
*/
private final int blocksizeKB;
private final int bufferSizeKB;
private final DurationStatsTable durationStats = new DurationStatsTable();
/**
* objects query endpoint. This is synchronized
* to handle a simultaneous update of all auth data in one
* go.
*/
private synchronized URI getEndpointURI() {
return endpointURI;
}
/**
* object location endpoint
*/
private synchronized URI getObjectLocationURI() {
return objectLocationURI;
}
/**
* token for Swift communication
*/
private synchronized AccessToken getToken() {
return token;
}
/**
* Setter of authentication and endpoint details.
* Being synchronized guarantees that all three fields are set up together.
* It is up to the reader to read all three fields in their own
* synchronized block to be sure that they are all consistent.
*
* @param endpoint endpoint URI
* @param objectLocation object location URI
* @param authToken auth token
*/
private void setAuthDetails(URI endpoint,
URI objectLocation,
AccessToken authToken) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("setAuth: endpoint=%s; objectURI=%s; token=%s",
endpoint, objectLocation, authToken));
}
synchronized (this) {
endpointURI = endpoint;
objectLocationURI = objectLocation;
token = authToken;
}
}
/**
* Base class for all Swift REST operations
*
* @param <M> method
* @param <R> result
*/
private static abstract class HttpMethodProcessor<M extends HttpMethod, R> {
public final M createMethod(String uri) throws IOException {
final M method = doCreateMethod(uri);
setup(method);
return method;
}
/**
* Override it to return some result after method is executed.
*/
public abstract R extractResult(M method) throws IOException;
/**
* Factory method to create a REST method against the given URI
*
* @param uri target
* @return method to invoke
*/
protected abstract M doCreateMethod(String uri);
/**
* Override port to set up the method before it is executed.
*/
protected void setup(M method) throws IOException {
}
/**
* Override point: what are the status codes that this operation supports
*
* @return an array with the permitted status code(s)
*/
protected int[] getAllowedStatusCodes() {
return new int[]{
SC_OK,
SC_CREATED,
SC_ACCEPTED,
SC_NO_CONTENT,
SC_PARTIAL_CONTENT,
};
}
}
private static abstract class GetMethodProcessor<R> extends HttpMethodProcessor<GetMethod, R> {
@Override
protected final GetMethod doCreateMethod(String uri) {
return new GetMethod(uri);
}
}
private static abstract class PostMethodProcessor<R> extends HttpMethodProcessor<PostMethod, R> {
@Override
protected final PostMethod doCreateMethod(String uri) {
return new PostMethod(uri);
}
}
/**
* There's a special type for auth messages, so that low-level
* message handlers can react to auth failures differently from everything
* else.
*/
private static class AuthPostMethod extends PostMethod {
private AuthPostMethod(String uri) {
super(uri);
}
}
/**
* Generate an auth message
* @param <R> response
*/
private static abstract class AuthMethodProcessor<R> extends
HttpMethodProcessor<AuthPostMethod, R> {
@Override
protected final AuthPostMethod doCreateMethod(String uri) {
return new AuthPostMethod(uri);
}
}
private static abstract class PutMethodProcessor<R> extends HttpMethodProcessor<PutMethod, R> {
@Override
protected final PutMethod doCreateMethod(String uri) {
return new PutMethod(uri);
}
/**
* Override point: what are the status codes that this operation supports
*
* @return the list of status codes to accept
*/
@Override
protected int[] getAllowedStatusCodes() {
return new int[]{
SC_OK,
SC_CREATED,
SC_NO_CONTENT,
SC_ACCEPTED,
};
}
}
/**
* Create operation
*
* @param <R>
*/
private static abstract class CopyMethodProcessor<R> extends HttpMethodProcessor<CopyMethod, R> {
@Override
protected final CopyMethod doCreateMethod(String uri) {
return new CopyMethod(uri);
}
/**
* The only allowed status code is 201:created
* @return an array with the permitted status code(s)
*/
@Override
protected int[] getAllowedStatusCodes() {
return new int[]{
SC_CREATED
};
}
}
/**
* Delete operation
*
* @param <R>
*/
private static abstract class DeleteMethodProcessor<R> extends HttpMethodProcessor<DeleteMethod, R> {
@Override
protected final DeleteMethod doCreateMethod(String uri) {
return new DeleteMethod(uri);
}
@Override
protected int[] getAllowedStatusCodes() {
return new int[]{
SC_OK,
SC_ACCEPTED,
SC_NO_CONTENT,
SC_NOT_FOUND
};
}
}
private static abstract class HeadMethodProcessor<R> extends HttpMethodProcessor<HeadMethod, R> {
@Override
protected final HeadMethod doCreateMethod(String uri) {
return new HeadMethod(uri);
}
}
/**
* Create a Swift Rest Client instance.
*
* @param filesystemURI filesystem URI
* @param conf The configuration to use to extract the binding
* @throws SwiftConfigurationException the configuration is not valid for
* defining a rest client against the service
*/
private SwiftRestClient(URI filesystemURI,
Configuration conf)
throws SwiftConfigurationException {
this.filesystemURI = filesystemURI;
Properties props = RestClientBindings.bind(filesystemURI, conf);
String stringAuthUri = getOption(props, SWIFT_AUTH_PROPERTY);
username = getOption(props, SWIFT_USERNAME_PROPERTY);
password = props.getProperty(SWIFT_PASSWORD_PROPERTY);
apiKey = props.getProperty(SWIFT_APIKEY_PROPERTY);
//optional
region = props.getProperty(SWIFT_REGION_PROPERTY);
//tenant is optional
tenant = props.getProperty(SWIFT_TENANT_PROPERTY);
//service is used for diagnostics
serviceProvider = props.getProperty(SWIFT_SERVICE_PROPERTY);
container = props.getProperty(SWIFT_CONTAINER_PROPERTY);
String isPubProp = props.getProperty(SWIFT_PUBLIC_PROPERTY, "false");
usePublicURL = "true".equals(isPubProp);
if (apiKey == null && password == null) {
throw new SwiftConfigurationException(
"Configuration for " + filesystemURI +" must contain either "
+ SWIFT_PASSWORD_PROPERTY + " or "
+ SWIFT_APIKEY_PROPERTY);
}
//create the (reusable) authentication request
if (password != null) {
authRequest = new PasswordAuthenticationRequest(tenant,
new PasswordCredentials(
username,
password));
} else {
authRequest = new ApiKeyAuthenticationRequest(tenant,
new ApiKeyCredentials(
username, apiKey));
keystoneAuthRequest = new KeyStoneAuthRequest(tenant,
new KeystoneApiKeyCredentials(username, apiKey));
}
locationAware = "true".equals(
props.getProperty(SWIFT_LOCATION_AWARE_PROPERTY, "false"));
//now read in properties that are shared across all connections
//connection and retries
try {
retryCount = conf.getInt(SWIFT_RETRY_COUNT, DEFAULT_RETRY_COUNT);
connectTimeout = conf.getInt(SWIFT_CONNECTION_TIMEOUT,
DEFAULT_CONNECT_TIMEOUT);
socketTimeout = conf.getInt(SWIFT_SOCKET_TIMEOUT,
DEFAULT_SOCKET_TIMEOUT);
throttleDelay = conf.getInt(SWIFT_THROTTLE_DELAY,
DEFAULT_THROTTLE_DELAY);
//proxy options
proxyHost = conf.get(SWIFT_PROXY_HOST_PROPERTY);
proxyPort = conf.getInt(SWIFT_PROXY_PORT_PROPERTY, 8080);
blocksizeKB = conf.getInt(SWIFT_BLOCKSIZE,
DEFAULT_SWIFT_BLOCKSIZE);
if (blocksizeKB <= 0) {
throw new SwiftConfigurationException("Invalid blocksize set in "
+ SWIFT_BLOCKSIZE
+ ": " + blocksizeKB);
}
partSizeKB = conf.getInt(SWIFT_PARTITION_SIZE,
DEFAULT_SWIFT_PARTITION_SIZE);
if (partSizeKB <=0) {
throw new SwiftConfigurationException("Invalid partition size set in "
+ SWIFT_PARTITION_SIZE
+ ": " + partSizeKB);
}
bufferSizeKB = conf.getInt(SWIFT_REQUEST_SIZE,
DEFAULT_SWIFT_REQUEST_SIZE);
if (bufferSizeKB <=0) {
throw new SwiftConfigurationException("Invalid buffer size set in "
+ SWIFT_REQUEST_SIZE
+ ": " + bufferSizeKB);
}
} catch (NumberFormatException e) {
//convert exceptions raised parsing ints and longs into
// SwiftConfigurationException instances
throw new SwiftConfigurationException(e.toString(), e);
}
//everything you need for diagnostics. The password is omitted.
serviceDescription = String.format(
"Service={%s} container={%s} uri={%s}"
+ " tenant={%s} user={%s} region={%s}"
+ " publicURL={%b}"
+ " location aware={%b}"
+ " partition size={%d KB}, buffer size={%d KB}"
+ " block size={%d KB}"
+ " connect timeout={%d}, retry count={%d}"
+ " socket timeout={%d}"
+ " throttle delay={%d}"
,
serviceProvider,
container,
stringAuthUri,
tenant,
username,
region != null ? region : "(none)",
usePublicURL,
locationAware,
partSizeKB,
bufferSizeKB,
blocksizeKB,
connectTimeout,
retryCount,
socketTimeout,
throttleDelay
);
if (LOG.isDebugEnabled()) {
LOG.debug(serviceDescription);
}
try {
this.authUri = new URI(stringAuthUri);
} catch (URISyntaxException e) {
throw new SwiftConfigurationException("The " + SWIFT_AUTH_PROPERTY
+ " property was incorrect: "
+ stringAuthUri, e);
}
}
/**
* Get a mandatory configuration option
*
* @param props property set
* @param key key
* @return value of the configuration
* @throws SwiftConfigurationException if there was no match for the key
*/
private static String getOption(Properties props, String key) throws
SwiftConfigurationException {
String val = props.getProperty(key);
if (val == null) {
throw new SwiftConfigurationException("Undefined property: " + key);
}
return val;
}
/**
* Make an HTTP GET request to Swift to get a range of data in the object.
*
* @param path path to object
* @param offset offset from file beginning
* @param length file length
* @return The input stream -which must be closed afterwards.
* @throws IOException Problems
* @throws SwiftException swift specific error
* @throws FileNotFoundException path is not there
*/
public HttpBodyContent getData(SwiftObjectPath path,
long offset,
long length) throws IOException {
if (offset < 0) {
throw new SwiftException("Invalid offset: " + offset
+ " in getDataAsInputStream( path=" + path
+ ", offset=" + offset
+ ", length =" + length + ")");
}
if (length <= 0) {
throw new SwiftException("Invalid length: " + length
+ " in getDataAsInputStream( path="+ path
+ ", offset=" + offset
+ ", length ="+ length + ")");
}
final String range = String.format(SWIFT_RANGE_HEADER_FORMAT_PATTERN,
offset,
offset + length - 1);
if (LOG.isDebugEnabled()) {
LOG.debug("getData:" + range);
}
return getData(path,
new Header(HEADER_RANGE, range),
SwiftRestClient.NEWEST);
}
/**
* Returns object length
*
* @param uri file URI
* @return object length
* @throws SwiftException on swift-related issues
* @throws IOException on network/IO problems
*/
public long getContentLength(URI uri) throws IOException {
preRemoteCommand("getContentLength");
return perform("getContentLength", uri, new HeadMethodProcessor<Long>() {
@Override
public Long extractResult(HeadMethod method) throws IOException {
return method.getResponseContentLength();
}
@Override
protected void setup(HeadMethod method) throws IOException {
super.setup(method);
method.addRequestHeader(NEWEST);
}
});
}
/**
* Get the length of the remote object
* @param path object to probe
* @return the content length
* @throws IOException on any failure
*/
public long getContentLength(SwiftObjectPath path) throws IOException {
return getContentLength(pathToURI(path));
}
/**
* Get the path contents as an input stream.
* <b>Warning:</b> this input stream must be closed to avoid
* keeping Http connections open.
*
* @param path path to file
* @param requestHeaders http headers
* @return byte[] file data or null if the object was not found
* @throws IOException on IO Faults
* @throws FileNotFoundException if there is nothing at the path
*/
public HttpBodyContent getData(SwiftObjectPath path,
final Header... requestHeaders)
throws IOException {
preRemoteCommand("getData");
return doGet(pathToURI(path),
requestHeaders);
}
/**
* Returns object location as byte[]
*
* @param path path to file
* @param requestHeaders http headers
* @return byte[] file data or null if the object was not found
* @throws IOException on IO Faults
*/
public byte[] getObjectLocation(SwiftObjectPath path,
final Header... requestHeaders) throws IOException {
if (!isLocationAware()) {
//if the filesystem is not location aware, do not ask for this information
return null;
}
preRemoteCommand("getObjectLocation");
try {
return perform("getObjectLocation", pathToObjectLocation(path),
new GetMethodProcessor<byte[]>() {
@Override
protected int[] getAllowedStatusCodes() {
return new int[]{
SC_OK,
SC_FORBIDDEN,
SC_NO_CONTENT
};
}
@Override
public byte[] extractResult(GetMethod method) throws
IOException {
//TODO: remove SC_NO_CONTENT if it depends on Swift versions
if (method.getStatusCode() == SC_NOT_FOUND
|| method.getStatusCode() == SC_FORBIDDEN ||
method.getStatusCode() == SC_NO_CONTENT ||
method.getResponseBodyAsStream() == null) {
return null;
}
final InputStream responseBodyAsStream = method.getResponseBodyAsStream();
final byte[] locationData = new byte[1024];
return responseBodyAsStream.read(locationData) > 0 ? locationData : null;
}
@Override
protected void setup(GetMethod method)
throws SwiftInternalStateException {
setHeaders(method, requestHeaders);
}
});
} catch (IOException e) {
LOG.warn("Failed to get the location of " + path + ": " + e, e);
return null;
}
}
/**
* Create the URI needed to query the location of an object
* @param path object path to retrieve information about
* @return the URI for the location operation
* @throws SwiftException if the URI could not be constructed
*/
private URI pathToObjectLocation(SwiftObjectPath path) throws SwiftException {
URI uri;
String dataLocationURI = objectLocationURI.toString();
try {
if (path.toString().startsWith("/")) {
dataLocationURI = dataLocationURI.concat(path.toUriPath());
} else {
dataLocationURI = dataLocationURI.concat("/").concat(path.toUriPath());
}
uri = new URI(dataLocationURI);
} catch (URISyntaxException e) {
throw new SwiftException(e);
}
return uri;
}
/**
* Find objects under a prefix
*
* @param path path prefix
* @param requestHeaders optional request headers
* @return byte[] file data or null if the object was not found
* @throws IOException on IO Faults
* @throws FileNotFoundException if nothing is at the end of the URI -that is,
* the directory is empty
*/
public byte[] findObjectsByPrefix(SwiftObjectPath path,
final Header... requestHeaders) throws IOException {
preRemoteCommand("findObjectsByPrefix");
URI uri;
String dataLocationURI = getEndpointURI().toString();
try {
String object = path.getObject();
if (object.startsWith("/")) {
object = object.substring(1);
}
object = encodeUrl(object);
dataLocationURI = dataLocationURI.concat("/")
.concat(path.getContainer())
.concat("/?prefix=")
.concat(object)
;
uri = new URI(dataLocationURI);
} catch (URISyntaxException e) {
throw new SwiftException("Bad URI: " + dataLocationURI, e);
}
return perform("findObjectsByPrefix", uri, new GetMethodProcessor<byte[]>() {
@Override
public byte[] extractResult(GetMethod method) throws IOException {
if (method.getStatusCode() == SC_NOT_FOUND) {
//no result
throw new FileNotFoundException("Not found " + method.getURI());
}
return method.getResponseBody();
}
@Override
protected int[] getAllowedStatusCodes() {
return new int[]{
SC_OK,
SC_NOT_FOUND
};
}
@Override
protected void setup(GetMethod method) throws
SwiftInternalStateException {
setHeaders(method, requestHeaders);
}
});
}
/**
* Find objects in a directory
*
* @param path path prefix
* @param requestHeaders optional request headers
* @return byte[] file data or null if the object was not found
* @throws IOException on IO Faults
* @throws FileNotFoundException if nothing is at the end of the URI -that is,
* the directory is empty
*/
public byte[] listDeepObjectsInDirectory(SwiftObjectPath path,
boolean listDeep,
final Header... requestHeaders)
throws IOException {
preRemoteCommand("listDeepObjectsInDirectory");
String endpoint = getEndpointURI().toString();
StringBuilder dataLocationURI = new StringBuilder();
dataLocationURI.append(endpoint);
String object = path.getObject();
if (object.startsWith("/")) {
object = object.substring(1);
}
if (!object.endsWith("/")) {
object = object.concat("/");
}
if (object.equals("/")) {
object = "";
}
dataLocationURI = dataLocationURI.append("/")
.append(path.getContainer())
.append("/?prefix=")
.append(object)
.append("&format=json");
//in listing deep set param to false
if (listDeep == false) {
dataLocationURI.append("&delimiter=/");
}
return findObjects(dataLocationURI.toString(), requestHeaders);
}
/**
* Find objects in a location
* @param location URI
* @param requestHeaders optional request headers
* @return the body of te response
* @throws IOException IO problems
*/
private byte[] findObjects(String location, final Header[] requestHeaders) throws
IOException {
URI uri;
preRemoteCommand("findObjects");
try {
uri = new URI(location);
} catch (URISyntaxException e) {
throw new SwiftException("Bad URI: " + location, e);
}
return perform("findObjects", uri, new GetMethodProcessor<byte[]>() {
@Override
public byte[] extractResult(GetMethod method) throws IOException {
if (method.getStatusCode() == SC_NOT_FOUND) {
//no result
throw new FileNotFoundException("Not found " + method.getURI());
}
return method.getResponseBody();
}
@Override
protected int[] getAllowedStatusCodes() {
return new int[]{
SC_OK,
SC_NOT_FOUND
};
}
@Override
protected void setup(GetMethod method)
throws SwiftInternalStateException {
setHeaders(method, requestHeaders);
}
});
}
/**
* Copy an object. This is done by sending a COPY method to the filesystem
* which is required to handle this WebDAV-level extension to the
* base HTTP operations.
*
* @param src source path
* @param dst destination path
* @param headers any headers
* @return true if the status code was considered successful
* @throws IOException on IO Faults
*/
public boolean copyObject(SwiftObjectPath src, final SwiftObjectPath dst,
final Header... headers) throws IOException {
preRemoteCommand("copyObject");
return perform("copy", pathToURI(src), new CopyMethodProcessor<Boolean>() {
@Override
public Boolean extractResult(CopyMethod method) throws IOException {
return method.getStatusCode() != SC_NOT_FOUND;
}
@Override
protected void setup(CopyMethod method) throws
SwiftInternalStateException {
setHeaders(method, headers);
method.addRequestHeader(HEADER_DESTINATION, dst.toUriPath());
}
});
}
/**
* Uploads file as Input Stream to Swift.
* The data stream will be closed after the request.
*
* @param path path to Swift
* @param data object data
* @param length length of data
* @param requestHeaders http headers
* @throws IOException on IO Faults
*/
public void upload(SwiftObjectPath path,
final InputStream data,
final long length,
final Header... requestHeaders)
throws IOException {
preRemoteCommand("upload");
try {
perform("upload", pathToURI(path), new PutMethodProcessor<byte[]>() {
@Override
public byte[] extractResult(PutMethod method) throws IOException {
return method.getResponseBody();
}
@Override
protected void setup(PutMethod method) throws
SwiftInternalStateException {
method.setRequestEntity(new InputStreamRequestEntity(data, length));
setHeaders(method, requestHeaders);
}
});
} finally {
data.close();
}
}
/**
* Deletes object from swift.
* The result is true if this operation did the deletion.
*
* @param path path to file
* @param requestHeaders http headers
* @throws IOException on IO Faults
*/
public boolean delete(SwiftObjectPath path, final Header... requestHeaders) throws IOException {
preRemoteCommand("delete");
return perform("", pathToURI(path), new DeleteMethodProcessor<Boolean>() {
@Override
public Boolean extractResult(DeleteMethod method) throws IOException {
return method.getStatusCode() == SC_NO_CONTENT;
}
@Override
protected void setup(DeleteMethod method) throws
SwiftInternalStateException {
setHeaders(method, requestHeaders);
}
});
}
/**
* Issue a head request
* @param reason reason -used in logs
* @param path path to query
* @param requestHeaders request header
* @return the response headers. This may be an empty list
* @throws IOException IO problems
* @throws FileNotFoundException if there is nothing at the end
*/
public Header[] headRequest(String reason,
SwiftObjectPath path,
final Header... requestHeaders)
throws IOException {
preRemoteCommand("headRequest: "+ reason);
return perform(reason, pathToURI(path), new HeadMethodProcessor<Header[]>() {
@Override
public Header[] extractResult(HeadMethod method) throws IOException {
if (method.getStatusCode() == SC_NOT_FOUND) {
throw new FileNotFoundException("Not Found " + method.getURI());
}
return method.getResponseHeaders();
}
@Override
protected void setup(HeadMethod method) throws
SwiftInternalStateException {
setHeaders(method, requestHeaders);
}
});
}
/**
* Issue a put request
* @param path path
* @param requestHeaders optional headers
* @return the HTTP response
* @throws IOException any problem
*/
public int putRequest(SwiftObjectPath path, final Header... requestHeaders)
throws IOException {
preRemoteCommand("putRequest");
return perform(pathToURI(path), new PutMethodProcessor<Integer>() {
@Override
public Integer extractResult(PutMethod method) throws IOException {
return method.getStatusCode();
}
@Override
protected void setup(PutMethod method) throws
SwiftInternalStateException {
setHeaders(method, requestHeaders);
}
});
}
/**
* Authenticate to Openstack Keystone
* As well as returning the access token, the member fields {@link #token},
* {@link #endpointURI} and {@link #objectLocationURI} are set up for re-use.
* <p>
* This method is re-entrant -if more than one thread attempts to authenticate
* neither will block -but the field values with have those of the last caller.
*
* @return authenticated access token
*/
public AccessToken authenticate() throws IOException {
final AuthenticationRequest authenticationRequest;
if (useKeystoneAuthentication) {
authenticationRequest = keystoneAuthRequest;
} else {
authenticationRequest = authRequest;
}
LOG.debug("started authentication");
return perform("authentication",
authUri,
new AuthenticationPost(authenticationRequest));
}
private class AuthenticationPost extends AuthMethodProcessor<AccessToken> {
final AuthenticationRequest authenticationRequest;
private AuthenticationPost(AuthenticationRequest authenticationRequest) {
this.authenticationRequest = authenticationRequest;
}
@Override
protected void setup(AuthPostMethod method) throws IOException {
method.setRequestEntity(getAuthenticationRequst(authenticationRequest));
}
/**
* specification says any of the 2xxs are OK, so list all
* the standard ones
* @return a set of 2XX status codes.
*/
@Override
protected int[] getAllowedStatusCodes() {
return new int[]{
SC_OK,
SC_BAD_REQUEST,
SC_CREATED,
SC_ACCEPTED,
SC_NON_AUTHORITATIVE_INFORMATION,
SC_NO_CONTENT,
SC_RESET_CONTENT,
SC_PARTIAL_CONTENT,
SC_MULTI_STATUS,
SC_UNAUTHORIZED //if request unauthorized, try another method
};
}
@Override
public AccessToken extractResult(AuthPostMethod method) throws IOException {
//initial check for failure codes leading to authentication failures
if (method.getStatusCode() == SC_BAD_REQUEST) {
throw new SwiftAuthenticationFailedException(
authenticationRequest.toString(), "POST", authUri, method);
}
final AuthenticationResponse access =
JSONUtil.toObject(method.getResponseBodyAsString(),
AuthenticationWrapper.class).getAccess();
final List<Catalog> serviceCatalog = access.getServiceCatalog();
//locate the specific service catalog that defines Swift; variations
//in the name of this add complexity to the search
boolean catalogMatch = false;
StringBuilder catList = new StringBuilder();
StringBuilder regionList = new StringBuilder();
//these fields are all set together at the end of the operation
URI endpointURI = null;
URI objectLocation;
Endpoint swiftEndpoint = null;
AccessToken accessToken;
for (Catalog catalog : serviceCatalog) {
String name = catalog.getName();
String type = catalog.getType();
String descr = String.format("[%s: %s]; ", name, type);
catList.append(descr);
if (LOG.isDebugEnabled()) {
LOG.debug("Catalog entry " + descr);
}
if (name.equals(SERVICE_CATALOG_SWIFT)
|| name.equals(SERVICE_CATALOG_CLOUD_FILES)
|| type.equals(SERVICE_CATALOG_OBJECT_STORE)) {
//swift is found
if (LOG.isDebugEnabled()) {
LOG.debug("Found swift catalog as " + name + " => " + type);
}
//now go through the endpoints
for (Endpoint endpoint : catalog.getEndpoints()) {
String endpointRegion = endpoint.getRegion();
URI publicURL = endpoint.getPublicURL();
URI internalURL = endpoint.getInternalURL();
descr = String.format("[%s => %s / %s]; ",
endpointRegion,
publicURL,
internalURL);
regionList.append(descr);
if (LOG.isDebugEnabled()) {
LOG.debug("Endpoint " + descr);
}
if (region == null || endpointRegion.equals(region)) {
endpointURI = usePublicURL ? publicURL : internalURL;
swiftEndpoint = endpoint;
break;
}
}
}
}
if (endpointURI == null) {
String message = "Could not find swift service from auth URL "
+ authUri
+ " and region '" + region + "'. "
+ "Categories: " + catList
+ ((regionList.length() > 0) ?
("regions: " + regionList)
: "No regions");
throw new SwiftInvalidResponseException(message,
SC_OK,
"authenticating",
authUri);
}
accessToken = access.getToken();
String path = SWIFT_OBJECT_AUTH_ENDPOINT
+ swiftEndpoint.getTenantId();
String host = endpointURI.getHost();
try {
objectLocation = new URI(endpointURI.getScheme(),
null,
host,
endpointURI.getPort(),
path,
null,
null);
} catch (URISyntaxException e) {
throw new SwiftException("object endpoint URI is incorrect: "
+ endpointURI
+ " + " + path,
e);
}
setAuthDetails(endpointURI, objectLocation, accessToken);
if (LOG.isDebugEnabled()) {
LOG.debug("authenticated against " + endpointURI);
}
createDefaultContainer();
return accessToken;
}
}
private StringRequestEntity getAuthenticationRequst(AuthenticationRequest authenticationRequest)
throws IOException {
final String data = JSONUtil.toJSON(new AuthenticationRequestWrapper(
authenticationRequest));
if (LOG.isDebugEnabled()) {
LOG.debug("Authenticating with " + authenticationRequest);
}
return toJsonEntity(data);
}
/**
* create default container if it doesn't exist for Hadoop Swift integration.
* non-reentrant, as this should only be needed once.
*
* @throws IOException IO problems.
*/
private synchronized void createDefaultContainer() throws IOException {
createContainer(container);
}
/**
* Create a container -if it already exists, do nothing
*
* @param containerName the container name
* @throws IOException IO problems
* @throws SwiftBadRequestException invalid container name
* @throws SwiftInvalidResponseException error from the server
*/
public void createContainer(String containerName) throws IOException {
SwiftObjectPath objectPath = new SwiftObjectPath(containerName, "");
try {
//see if the data is there
headRequest("createContainer", objectPath, NEWEST);
} catch (FileNotFoundException ex) {
int status = 0;
try {
status = putRequest(objectPath);
} catch (FileNotFoundException e) {
//triggered by a very bad container name.
//re-insert the 404 result into the status
status = SC_NOT_FOUND;
}
if (status == SC_BAD_REQUEST) {
throw new SwiftBadRequestException(
"Bad request -authentication failure or bad container name?",
status,
"PUT",
null);
}
if (!isStatusCodeExpected(status,
SC_OK,
SC_CREATED,
SC_ACCEPTED,
SC_NO_CONTENT)) {
throw new SwiftInvalidResponseException("Couldn't create container "
+ containerName +
" for storing data in Swift." +
" Try to create container " +
containerName + " manually ",
status,
"PUT",
null);
} else {
throw ex;
}
}
}
/**
* Trigger an initial auth operation if some of the needed
* fields are missing
*
* @throws IOException on problems
*/
private void authIfNeeded() throws IOException {
if (getEndpointURI() == null) {
authenticate();
}
}
/**
* Pre-execution actions to be performed by methods. Currently this
* <ul>
* <li>Logs the operation at TRACE</li>
* <li>Authenticates the client -if needed</li>
* </ul>
* @throws IOException
*/
private void preRemoteCommand(String operation) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Executing " + operation);
}
authIfNeeded();
}
/**
* Performs the HTTP request, validates the response code and returns
* the received data. HTTP Status codes are converted into exceptions.
*
* @param uri URI to source
* @param processor HttpMethodProcessor
* @param <M> method
* @param <R> result type
* @return result of HTTP request
* @throws IOException IO problems
* @throws SwiftBadRequestException the status code indicated "Bad request"
* @throws SwiftInvalidResponseException the status code is out of range
* for the action (excluding 404 responses)
* @throws SwiftInternalStateException the internal state of this client
* is invalid
* @throws FileNotFoundException a 404 response was returned
*/
private <M extends HttpMethod, R> R perform(URI uri,
HttpMethodProcessor<M, R> processor)
throws IOException,
SwiftBadRequestException,
SwiftInternalStateException,
SwiftInvalidResponseException,
FileNotFoundException {
return perform("",uri, processor);
}
/**
* Performs the HTTP request, validates the response code and returns
* the received data. HTTP Status codes are converted into exceptions.
* @param reason why is this operation taking place. Used for statistics
* @param uri URI to source
* @param processor HttpMethodProcessor
* @param <M> method
* @param <R> result type
* @return result of HTTP request
* @throws IOException IO problems
* @throws SwiftBadRequestException the status code indicated "Bad request"
* @throws SwiftInvalidResponseException the status code is out of range
* for the action (excluding 404 responses)
* @throws SwiftInternalStateException the internal state of this client
* is invalid
* @throws FileNotFoundException a 404 response was returned
*/
private <M extends HttpMethod, R> R perform(String reason,
URI uri,
HttpMethodProcessor<M, R> processor)
throws IOException, SwiftBadRequestException, SwiftInternalStateException,
SwiftInvalidResponseException, FileNotFoundException {
checkNotNull(uri);
checkNotNull(processor);
final M method = processor.createMethod(uri.toString());
//retry policy
HttpMethodParams methodParams = method.getParams();
methodParams.setParameter(HttpMethodParams.RETRY_HANDLER,
new DefaultHttpMethodRetryHandler(
retryCount, false));
methodParams.setIntParameter(HttpConnectionParams.CONNECTION_TIMEOUT,
connectTimeout);
methodParams.setSoTimeout(socketTimeout);
method.addRequestHeader(HEADER_USER_AGENT, SWIFT_USER_AGENT);
Duration duration = new Duration();
boolean success = false;
try {
int statusCode = 0;
try {
statusCode = exec(method);
} catch (IOException e) {
//rethrow with extra diagnostics and wiki links
throw ExceptionDiags.wrapException(uri.toString(), method.getName(), e);
}
//look at the response and see if it was valid or not.
//Valid is more than a simple 200; even 404 "not found" is considered
//valid -which it is for many methods.
//validate the allowed status code for this operation
int[] allowedStatusCodes = processor.getAllowedStatusCodes();
boolean validResponse = isStatusCodeExpected(statusCode,
allowedStatusCodes);
if (!validResponse) {
IOException ioe = buildException(uri, method, statusCode);
throw ioe;
}
R r = processor.extractResult(method);
success = true;
return r;
} catch (IOException e) {
//release the connection -always
method.releaseConnection();
throw e;
} finally {
duration.finished();
durationStats.add(method.getName()+" " + reason, duration, success);
}
}
/**
* Build an exception from a failed operation. This can include generating
* specific exceptions (e.g. FileNotFound), as well as the default
* {@link SwiftInvalidResponseException}.
*
* @param uri URI for operation
* @param method operation that failed
* @param statusCode status code
* @param <M> method type
* @return an exception to throw
*/
private <M extends HttpMethod> IOException buildException(URI uri,
M method,
int statusCode) {
IOException fault;
//log the failure @debug level
String errorMessage = String.format("Method %s on %s failed, status code: %d," +
" status line: %s",
method.getName(),
uri,
statusCode,
method.getStatusLine()
);
if (LOG.isDebugEnabled()) {
LOG.debug(errorMessage);
}
//send the command
switch (statusCode) {
case SC_NOT_FOUND:
fault = new FileNotFoundException("Operation " + method.getName()
+ " on " + uri);
break;
case SC_BAD_REQUEST:
//bad HTTP request
fault = new SwiftBadRequestException(
"Bad request against " + uri,
method.getName(),
uri,
method);
break;
case SC_REQUESTED_RANGE_NOT_SATISFIABLE:
//out of range
StringBuilder errorText = new StringBuilder(method.getStatusText());
//get the requested length
Header requestContentLen = method.getRequestHeader(HEADER_CONTENT_LENGTH);
if (requestContentLen!=null) {
errorText.append(" requested ").append(requestContentLen.getValue());
}
//and the result
Header availableContentRange = method.getResponseHeader(
HEADER_CONTENT_RANGE);
if (requestContentLen!=null) {
errorText.append(" available ").append(availableContentRange.getValue());
}
fault = new EOFException(errorText.toString());
break;
case SC_UNAUTHORIZED:
//auth failure; should only happen on the second attempt
fault = new SwiftAuthenticationFailedException(
"Operation not authorized- current access token ="
+ getToken(),
method.getName(),
uri,
method);
break;
case SwiftProtocolConstants.SC_TOO_MANY_REQUESTS_429:
case SwiftProtocolConstants.SC_THROTTLED_498:
//response code that may mean the client is being throttled
fault = new SwiftThrottledRequestException(
"Client is being throttled: too many requests",
method.getName(),
uri,
method);
break;
default:
//return a generic invalid HTTP response
fault = new SwiftInvalidResponseException(
errorMessage,
method.getName(),
uri,
method);
}
return fault;
}
/**
* Exec a GET request and return the input stream of the response
*
* @param uri URI to GET
* @param requestHeaders request headers
* @return the input stream. This must be closed to avoid log errors
* @throws IOException
*/
private HttpBodyContent doGet(final URI uri, final Header... requestHeaders) throws IOException {
return perform("", uri, new GetMethodProcessor<HttpBodyContent>() {
@Override
public HttpBodyContent extractResult(GetMethod method) throws IOException {
return
new HttpBodyContent(
new HttpInputStreamWithRelease(uri, method), method.getResponseContentLength()
);
}
@Override
protected void setup(GetMethod method) throws
SwiftInternalStateException {
setHeaders(method, requestHeaders);
}
});
}
/**
* Create an instance against a specific FS URI,
*
* @param filesystemURI filesystem to bond to
* @param config source of configuration data
* @return REST client instance
* @throws IOException on instantiation problems
*/
public static SwiftRestClient getInstance(URI filesystemURI,
Configuration config) throws IOException {
return new SwiftRestClient(filesystemURI, config);
}
/**
* Convert the (JSON) data to a string request as UTF-8
*
* @param data data
* @return the data
* @throws SwiftException if for some very unexpected reason it's impossible
* to convert the data to UTF-8.
*/
private static StringRequestEntity toJsonEntity(String data) throws
SwiftException {
StringRequestEntity entity;
try {
entity = new StringRequestEntity(data, "application/json", "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new SwiftException("Could not encode data as UTF-8", e);
}
return entity;
}
/**
* Converts Swift path to URI to make request.
* This is public for unit testing
*
* @param path path to object
* @param endpointURI damain url e.g. http://domain.com
* @return valid URI for object
* @throws SwiftException
*/
public static URI pathToURI(SwiftObjectPath path,
URI endpointURI) throws SwiftException {
checkNotNull(endpointURI, "Null Endpoint -client is not authenticated");
String dataLocationURI = endpointURI.toString();
try {
dataLocationURI = SwiftUtils.joinPaths(dataLocationURI, encodeUrl(path.toUriPath()));
return new URI(dataLocationURI);
} catch (URISyntaxException e) {
throw new SwiftException("Failed to create URI from " + dataLocationURI, e);
}
}
/**
* Encode the URL. This extends {@link URLEncoder#encode(String, String)}
* with a replacement of + with %20.
* @param url URL string
* @return an encoded string
* @throws SwiftException if the URL cannot be encoded
*/
private static String encodeUrl(String url) throws SwiftException {
if (url.matches(".*\\s+.*")) {
try {
url = URLEncoder.encode(url, "UTF-8");
url = url.replace("+", "%20");
} catch (UnsupportedEncodingException e) {
throw new SwiftException("failed to encode URI", e);
}
}
return url;
}
/**
* Convert a swift path to a URI relative to the current endpoint.
*
* @param path path
* @return an path off the current endpoint URI.
* @throws SwiftException
*/
private URI pathToURI(SwiftObjectPath path) throws SwiftException {
return pathToURI(path, getEndpointURI());
}
/**
* Add the headers to the method, and the auth token (which must be set
* @param method method to update
* @param requestHeaders the list of headers
* @throws SwiftInternalStateException not yet authenticated
*/
private void setHeaders(HttpMethodBase method, Header[] requestHeaders)
throws SwiftInternalStateException {
for (Header header : requestHeaders) {
method.addRequestHeader(header);
}
setAuthToken(method, getToken());
}
/**
* Set the auth key header of the method to the token ID supplied
*
* @param method method
* @param accessToken access token
* @throws SwiftInternalStateException if the client is not yet authenticated
*/
private void setAuthToken(HttpMethodBase method, AccessToken accessToken)
throws SwiftInternalStateException {
checkNotNull(accessToken,"Not authenticated");
method.addRequestHeader(HEADER_AUTH_KEY, accessToken.getId());
}
/**
* Execute a method in a new HttpClient instance.
* If the auth failed, authenticate then retry the method.
*
* @param method methot to exec
* @param <M> Method type
* @return the status code
* @throws IOException on any failure
*/
private <M extends HttpMethod> int exec(M method) throws IOException {
final HttpClient client = new HttpClient();
if (proxyHost != null) {
client.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY,
new HttpHost(proxyHost, proxyPort));
}
int statusCode = execWithDebugOutput(method, client);
if ((statusCode == HttpStatus.SC_UNAUTHORIZED
|| statusCode == HttpStatus.SC_BAD_REQUEST)
&& method instanceof AuthPostMethod
&& !useKeystoneAuthentication) {
if (LOG.isDebugEnabled()) {
LOG.debug("Operation failed with status " + method.getStatusCode() +
" attempting keystone auth");
}
//if rackspace key authentication failed - try custom Keystone authentication
useKeystoneAuthentication = true;
final AuthPostMethod authentication = (AuthPostMethod) method;
//replace rackspace auth with keystone one
authentication.setRequestEntity(getAuthenticationRequst(keystoneAuthRequest));
statusCode = execWithDebugOutput(method, client);
}
if (statusCode == HttpStatus.SC_UNAUTHORIZED ) {
//unauthed -or the auth uri rejected it.
if (method instanceof AuthPostMethod) {
//unauth response from the AUTH URI itself.
throw new SwiftAuthenticationFailedException(authRequest.toString(),
"auth",
authUri,
method);
}
//any other URL: try again
if (LOG.isDebugEnabled()) {
LOG.debug("Reauthenticating");
}
//re-auth, this may recurse into the same dir
authenticate();
if (LOG.isDebugEnabled()) {
LOG.debug("Retrying original request");
}
statusCode = execWithDebugOutput(method, client);
}
return statusCode;
}
/**
* Execute the request with the request and response logged at debug level
* @param method method to execute
* @param client client to use
* @param <M> method type
* @return the status code
* @throws IOException any failure reported by the HTTP client.
*/
private <M extends HttpMethod> int execWithDebugOutput(M method,
HttpClient client) throws
IOException {
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder(
method.getName() + " " + method.getURI() + "\n");
for (Header header : method.getRequestHeaders()) {
builder.append(header.toString());
}
LOG.debug(builder);
}
int statusCode = client.executeMethod(method);
if (LOG.isDebugEnabled()) {
LOG.debug("Status code = " + statusCode);
}
return statusCode;
}
/**
* Ensures that an object reference passed as a parameter to the calling
* method is not null.
*
* @param reference an object reference
* @return the non-null reference that was validated
* @throws NullPointerException if {@code reference} is null
*/
private static <T> T checkNotNull(T reference) throws
SwiftInternalStateException {
return checkNotNull(reference, "Null Reference");
}
private static <T> T checkNotNull(T reference, String message) throws
SwiftInternalStateException {
if (reference == null) {
throw new SwiftInternalStateException(message);
}
return reference;
}
/**
* Check for a status code being expected -takes a list of expected values
*
* @param status received status
* @param expected expected value
* @return true iff status is an element of [expected]
*/
private boolean isStatusCodeExpected(int status, int... expected) {
for (int code : expected) {
if (status == code) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "Swift client: " + serviceDescription;
}
/**
* Get the region which this client is bound to
* @return the region
*/
public String getRegion() {
return region;
}
/**
* Get the tenant to which this client is bound
* @return the tenant
*/
public String getTenant() {
return tenant;
}
/**
* Get the username this client identifies itself as
* @return the username
*/
public String getUsername() {
return username;
}
/**
* Get the container to which this client is bound
* @return the container
*/
public String getContainer() {
return container;
}
/**
* Is this client bound to a location aware Swift blobstore
* -that is, can you query for the location of partitions
* @return true iff the location of multipart file uploads
* can be determined.
*/
public boolean isLocationAware() {
return locationAware;
}
/**
* Get the blocksize of this filesystem
* @return a blocksize > 0
*/
public long getBlocksizeKB() {
return blocksizeKB;
}
/**
* Get the partition size in KB
* @return the partition size
*/
public int getPartSizeKB() {
return partSizeKB;
}
/**
* Get the buffer size in KB
* @return the buffer size wanted for reads
*/
public int getBufferSizeKB() {
return bufferSizeKB;
}
public int getProxyPort() {
return proxyPort;
}
public String getProxyHost() {
return proxyHost;
}
public int getRetryCount() {
return retryCount;
}
public int getConnectTimeout() {
return connectTimeout;
}
public boolean isUsePublicURL() {
return usePublicURL;
}
public int getThrottleDelay() {
return throttleDelay;
}
/**
* Get the current operation statistics
* @return a snapshot of the statistics
*/
public List<DurationStats> getOperationStatistics() {
return durationStats.getDurationStatistics();
}
}
| 60,645 | 31.292865 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/SwiftProtocolConstants.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
import org.apache.hadoop.util.VersionInfo;
/**
* Constants used in the Swift REST protocol,
* and in the properties used to configure the {@link SwiftRestClient}.
*/
public class SwiftProtocolConstants {
/**
* Swift-specific header for authentication: {@value}
*/
public static final String HEADER_AUTH_KEY = "X-Auth-Token";
/**
* Default port used by Swift for HTTP
*/
public static final int SWIFT_HTTP_PORT = 8080;
/**
* Default port used by Swift Auth for HTTPS
*/
public static final int SWIFT_HTTPS_PORT = 443;
/** HTTP standard {@value} header */
public static final String HEADER_RANGE = "Range";
/** HTTP standard {@value} header */
public static final String HEADER_DESTINATION = "Destination";
/** HTTP standard {@value} header */
public static final String HEADER_LAST_MODIFIED = "Last-Modified";
/** HTTP standard {@value} header */
public static final String HEADER_CONTENT_LENGTH = "Content-Length";
/** HTTP standard {@value} header */
public static final String HEADER_CONTENT_RANGE = "Content-Range";
/**
* Patten for range headers
*/
public static final String SWIFT_RANGE_HEADER_FORMAT_PATTERN = "bytes=%d-%d";
/**
* section in the JSON catalog provided after auth listing the swift FS:
* {@value}
*/
public static final String SERVICE_CATALOG_SWIFT = "swift";
/**
* section in the JSON catalog provided after auth listing the cloudfiles;
* this is an alternate catalog entry name
* {@value}
*/
public static final String SERVICE_CATALOG_CLOUD_FILES = "cloudFiles";
/**
* section in the JSON catalog provided after auth listing the object store;
* this is an alternate catalog entry name
* {@value}
*/
public static final String SERVICE_CATALOG_OBJECT_STORE = "object-store";
/**
* entry in the swift catalog defining the prefix used to talk to objects
* {@value}
*/
public static final String SWIFT_OBJECT_AUTH_ENDPOINT =
"/object_endpoint/";
/**
* Swift-specific header: object manifest used in the final upload
* of a multipart operation: {@value}
*/
public static final String X_OBJECT_MANIFEST = "X-Object-Manifest";
/**
* Swift-specific header -#of objects in a container: {@value}
*/
public static final String X_CONTAINER_OBJECT_COUNT =
"X-Container-Object-Count";
/**
* Swift-specific header: no. of bytes used in a container {@value}
*/
public static final String X_CONTAINER_BYTES_USED = "X-Container-Bytes-Used";
/**
* Header to set when requesting the latest version of a file: : {@value}
*/
public static final String X_NEWEST = "X-Newest";
/**
* throttled response sent by some endpoints.
*/
public static final int SC_THROTTLED_498 = 498;
/**
* W3C recommended status code for throttled operations
*/
public static final int SC_TOO_MANY_REQUESTS_429 = 429;
public static final String FS_SWIFT = "fs.swift";
/**
* Prefix for all instance-specific values in the configuration: {@value}
*/
public static final String SWIFT_SERVICE_PREFIX = FS_SWIFT + ".service.";
/**
* timeout for all connections: {@value}
*/
public static final String SWIFT_CONNECTION_TIMEOUT =
FS_SWIFT + ".connect.timeout";
/**
* timeout for all connections: {@value}
*/
public static final String SWIFT_SOCKET_TIMEOUT =
FS_SWIFT + ".socket.timeout";
/**
* the default socket timeout in millis {@value}.
* This controls how long the connection waits for responses from
* servers.
*/
public static final int DEFAULT_SOCKET_TIMEOUT = 60000;
/**
* connection retry count for all connections: {@value}
*/
public static final String SWIFT_RETRY_COUNT =
FS_SWIFT + ".connect.retry.count";
/**
* delay in millis between bulk (delete, rename, copy operations: {@value}
*/
public static final String SWIFT_THROTTLE_DELAY =
FS_SWIFT + ".connect.throttle.delay";
/**
* the default throttle delay in millis {@value}
*/
public static final int DEFAULT_THROTTLE_DELAY = 0;
/**
* blocksize for all filesystems: {@value}
*/
public static final String SWIFT_BLOCKSIZE =
FS_SWIFT + ".blocksize";
/**
* the default blocksize for filesystems in KB: {@value}
*/
public static final int DEFAULT_SWIFT_BLOCKSIZE = 32 * 1024;
/**
* partition size for all filesystems in KB: {@value}
*/
public static final String SWIFT_PARTITION_SIZE =
FS_SWIFT + ".partsize";
/**
* The default partition size for uploads: {@value}
*/
public static final int DEFAULT_SWIFT_PARTITION_SIZE = 4608*1024;
/**
* request size for reads in KB: {@value}
*/
public static final String SWIFT_REQUEST_SIZE =
FS_SWIFT + ".requestsize";
/**
* The default reqeuest size for reads: {@value}
*/
public static final int DEFAULT_SWIFT_REQUEST_SIZE = 64;
public static final String HEADER_USER_AGENT="User-Agent";
/**
* The user agent sent in requests.
*/
public static final String SWIFT_USER_AGENT= "Apache Hadoop Swift Client "
+ VersionInfo.getBuildVersion();
/**
* Key for passing the service name as a property -not read from the
* configuration : {@value}
*/
public static final String DOT_SERVICE = ".SERVICE-NAME";
/**
* Key for passing the container name as a property -not read from the
* configuration : {@value}
*/
public static final String DOT_CONTAINER = ".CONTAINER-NAME";
public static final String DOT_AUTH_URL = ".auth.url";
public static final String DOT_TENANT = ".tenant";
public static final String DOT_USERNAME = ".username";
public static final String DOT_PASSWORD = ".password";
public static final String DOT_HTTP_PORT = ".http.port";
public static final String DOT_HTTPS_PORT = ".https.port";
public static final String DOT_REGION = ".region";
public static final String DOT_PROXY_HOST = ".proxy.host";
public static final String DOT_PROXY_PORT = ".proxy.port";
public static final String DOT_LOCATION_AWARE = ".location-aware";
public static final String DOT_APIKEY = ".apikey";
public static final String DOT_USE_APIKEY = ".useApikey";
/**
* flag to say use public URL
*/
public static final String DOT_PUBLIC = ".public";
public static final String SWIFT_SERVICE_PROPERTY = FS_SWIFT + DOT_SERVICE;
public static final String SWIFT_CONTAINER_PROPERTY = FS_SWIFT + DOT_CONTAINER;
public static final String SWIFT_AUTH_PROPERTY = FS_SWIFT + DOT_AUTH_URL;
public static final String SWIFT_TENANT_PROPERTY = FS_SWIFT + DOT_TENANT;
public static final String SWIFT_USERNAME_PROPERTY = FS_SWIFT + DOT_USERNAME;
public static final String SWIFT_PASSWORD_PROPERTY = FS_SWIFT + DOT_PASSWORD;
public static final String SWIFT_APIKEY_PROPERTY = FS_SWIFT + DOT_APIKEY;
public static final String SWIFT_HTTP_PORT_PROPERTY = FS_SWIFT + DOT_HTTP_PORT;
public static final String SWIFT_HTTPS_PORT_PROPERTY = FS_SWIFT
+ DOT_HTTPS_PORT;
public static final String SWIFT_REGION_PROPERTY = FS_SWIFT + DOT_REGION;
public static final String SWIFT_PUBLIC_PROPERTY = FS_SWIFT + DOT_PUBLIC;
public static final String SWIFT_USE_API_KEY_PROPERTY = FS_SWIFT + DOT_USE_APIKEY;
public static final String SWIFT_LOCATION_AWARE_PROPERTY = FS_SWIFT +
DOT_LOCATION_AWARE;
public static final String SWIFT_PROXY_HOST_PROPERTY = FS_SWIFT + DOT_PROXY_HOST;
public static final String SWIFT_PROXY_PORT_PROPERTY = FS_SWIFT + DOT_PROXY_PORT;
public static final String HTTP_ROUTE_DEFAULT_PROXY =
"http.route.default-proxy";
/**
* Topology to return when a block location is requested
*/
public static final String TOPOLOGY_PATH = "/swift/unknown";
/**
* Block location to return when a block location is requested
*/
public static final String BLOCK_LOCATION = "/default-rack/swift";
/**
* Default number of attempts to retry a connect request: {@value}
*/
static final int DEFAULT_RETRY_COUNT = 3;
/**
* Default timeout in milliseconds for connection requests: {@value}
*/
static final int DEFAULT_CONNECT_TIMEOUT = 15000;
}
| 9,089 | 32.542435 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/CopyMethod.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
import org.apache.commons.httpclient.methods.EntityEnclosingMethod;
/**
* Implementation for SwiftRestClient to make copy requests.
* COPY is a method that came with WebDAV (RFC2518), and is not something that
* can be handled by all proxies en-route to a filesystem.
*/
class CopyMethod extends EntityEnclosingMethod {
public CopyMethod(String uri) {
super(uri);
}
/**
* @return http method name
*/
@Override
public String getName() {
return "COPY";
}
}
| 1,338 | 30.880952 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/ExceptionDiags.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.ConnectException;
import java.net.NoRouteToHostException;
import java.net.SocketTimeoutException;
import java.net.UnknownHostException;
/**
* Variant of Hadoop Netutils exception wrapping with URI awareness and
* available in branch-1 too.
*/
public class ExceptionDiags {
private static final Log LOG = LogFactory.getLog(ExceptionDiags.class);
/** text to point users elsewhere: {@value} */
private static final String FOR_MORE_DETAILS_SEE
= " For more details see: ";
/** text included in wrapped exceptions if the host is null: {@value} */
public static final String UNKNOWN_HOST = "(unknown)";
/** Base URL of the Hadoop Wiki: {@value} */
public static final String HADOOP_WIKI = "http://wiki.apache.org/hadoop/";
/**
* Take an IOException and a URI, wrap it where possible with
* something that includes the URI
*
* @param dest target URI
* @param operation operation
* @param exception the caught exception.
* @return an exception to throw
*/
public static IOException wrapException(final String dest,
final String operation,
final IOException exception) {
String action = operation + " " + dest;
String xref = null;
if (exception instanceof ConnectException) {
xref = "ConnectionRefused";
} else if (exception instanceof UnknownHostException) {
xref = "UnknownHost";
} else if (exception instanceof SocketTimeoutException) {
xref = "SocketTimeout";
} else if (exception instanceof NoRouteToHostException) {
xref = "NoRouteToHost";
}
String msg = action
+ " failed on exception: "
+ exception;
if (xref != null) {
msg = msg + ";" + see(xref);
}
return wrapWithMessage(exception, msg);
}
private static String see(final String entry) {
return FOR_MORE_DETAILS_SEE + HADOOP_WIKI + entry;
}
@SuppressWarnings("unchecked")
private static <T extends IOException> T wrapWithMessage(
T exception, String msg) {
Class<? extends Throwable> clazz = exception.getClass();
try {
Constructor<? extends Throwable> ctor =
clazz.getConstructor(String.class);
Throwable t = ctor.newInstance(msg);
return (T) (t.initCause(exception));
} catch (Throwable e) {
LOG.warn("Unable to wrap exception of type " +
clazz + ": it has no (String) constructor", e);
return exception;
}
}
}
| 3,535 | 34.36 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpBodyContent.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.swift.http;
/**
* Response tuple from GET operations; combines the input stream with the content length
*/
public class HttpBodyContent {
private final long contentLength;
private final HttpInputStreamWithRelease inputStream;
/**
* build a body response
* @param inputStream input stream from the operatin
* @param contentLength length of content; may be -1 for "don't know"
*/
public HttpBodyContent(HttpInputStreamWithRelease inputStream,
long contentLength) {
this.contentLength = contentLength;
this.inputStream = inputStream;
}
public long getContentLength() {
return contentLength;
}
public HttpInputStreamWithRelease getInputStream() {
return inputStream;
}
}
| 1,579 | 33.347826 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.mapred.gridmix.LoadJob.ResourceUsageMatcherRunner;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageEmulatorPlugin;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageMatcher;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin.DefaultCpuUsageEmulator;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/**
* Test Gridmix's resource emulator framework and supported plugins.
*/
public class TestResourceUsageEmulators {
/**
* A {@link ResourceUsageEmulatorPlugin} implementation for testing purpose.
* It essentially creates a file named 'test' in the test directory.
*/
static class TestResourceUsageEmulatorPlugin
implements ResourceUsageEmulatorPlugin {
static final Path rootTempDir =
new Path(System.getProperty("test.build.data", "/tmp"));
static final Path tempDir =
new Path(rootTempDir, "TestResourceUsageEmulatorPlugin");
static final String DEFAULT_IDENTIFIER = "test";
private Path touchPath = null;
private FileSystem fs = null;
@Override
public void emulate() throws IOException, InterruptedException {
// add some time between 2 calls to emulate()
try {
Thread.sleep(1000); // sleep for 1s
} catch (Exception e){}
try {
fs.delete(touchPath, false); // delete the touch file
//TODO Search for a better touch utility
fs.create(touchPath).close(); // recreate it
} catch (Exception e) {
throw new RuntimeException(e);
}
}
protected String getIdentifier() {
return DEFAULT_IDENTIFIER;
}
private static Path getFilePath(String id) {
return new Path(tempDir, id);
}
private static Path getInitFilePath(String id) {
return new Path(tempDir, id + ".init");
}
@Override
public void initialize(Configuration conf, ResourceUsageMetrics metrics,
ResourceCalculatorPlugin monitor, Progressive progress) {
// add some time between 2 calls to initialize()
try {
Thread.sleep(1000); // sleep for 1s
} catch (Exception e){}
try {
fs = FileSystem.getLocal(conf);
Path initPath = getInitFilePath(getIdentifier());
fs.delete(initPath, false); // delete the old file
fs.create(initPath).close(); // create a new one
touchPath = getFilePath(getIdentifier());
fs.delete(touchPath, false);
} catch (Exception e) {
} finally {
if (fs != null) {
try {
fs.deleteOnExit(tempDir);
} catch (IOException ioe){}
}
}
}
// test if the emulation framework successfully loaded this plugin
static long testInitialization(String id, Configuration conf)
throws IOException {
Path testPath = getInitFilePath(id);
FileSystem fs = FileSystem.getLocal(conf);
return fs.exists(testPath)
? fs.getFileStatus(testPath).getModificationTime()
: 0;
}
// test if the emulation framework successfully loaded this plugin
static long testEmulation(String id, Configuration conf)
throws IOException {
Path testPath = getFilePath(id);
FileSystem fs = FileSystem.getLocal(conf);
return fs.exists(testPath)
? fs.getFileStatus(testPath).getModificationTime()
: 0;
}
@Override
public float getProgress() {
try {
return fs.exists(touchPath) ? 1.0f : 0f;
} catch (IOException ioe) {}
return 0f;
}
}
/**
* Test implementation of {@link ResourceUsageEmulatorPlugin} which creates
* a file named 'others' in the test directory.
*/
static class TestOthers extends TestResourceUsageEmulatorPlugin {
static final String ID = "others";
@Override
protected String getIdentifier() {
return ID;
}
}
/**
* Test implementation of {@link ResourceUsageEmulatorPlugin} which creates
* a file named 'cpu' in the test directory.
*/
static class TestCpu extends TestResourceUsageEmulatorPlugin {
static final String ID = "cpu";
@Override
protected String getIdentifier() {
return ID;
}
}
/**
* Test {@link ResourceUsageMatcher}.
*/
@Test
public void testResourceUsageMatcher() throws Exception {
ResourceUsageMatcher matcher = new ResourceUsageMatcher();
Configuration conf = new Configuration();
conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
TestResourceUsageEmulatorPlugin.class,
ResourceUsageEmulatorPlugin.class);
long currentTime = System.currentTimeMillis();
matcher.configure(conf, null, null, null);
matcher.matchResourceUsage();
String id = TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
long result =
TestResourceUsageEmulatorPlugin.testInitialization(id, conf);
assertTrue("Resource usage matcher failed to initialize the configured"
+ " plugin", result > currentTime);
result = TestResourceUsageEmulatorPlugin.testEmulation(id, conf);
assertTrue("Resource usage matcher failed to load and emulate the"
+ " configured plugin", result > currentTime);
// test plugin order to first emulate cpu and then others
conf.setStrings(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
TestCpu.class.getName() + "," + TestOthers.class.getName());
matcher.configure(conf, null, null, null);
// test the initialization order
long time1 =
TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID, conf);
long time2 =
TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,
conf);
assertTrue("Resource usage matcher failed to initialize the configured"
+ " plugins in order", time1 < time2);
matcher.matchResourceUsage();
// Note that the cpu usage emulator plugin is configured 1st and then the
// others plugin.
time1 =
TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID, conf);
time2 =
TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,
conf);
assertTrue("Resource usage matcher failed to load the configured plugins",
time1 < time2);
}
/**
* Fakes the cumulative usage using {@link FakeCpuUsageEmulatorCore}.
*/
static class FakeResourceUsageMonitor extends DummyResourceCalculatorPlugin {
private FakeCpuUsageEmulatorCore core;
public FakeResourceUsageMonitor(FakeCpuUsageEmulatorCore core) {
this.core = core;
}
/**
* A dummy CPU usage monitor. Every call to
* {@link ResourceCalculatorPlugin#getCumulativeCpuTime()} will return the
* value of {@link FakeCpuUsageEmulatorCore#getNumCalls()}.
*/
@Override
public long getCumulativeCpuTime() {
return core.getCpuUsage();
}
}
/**
* A dummy {@link Progressive} implementation that allows users to set the
* progress for testing. The {@link Progressive#getProgress()} call will
* return the last progress value set using
* {@link FakeProgressive#setProgress(float)}.
*/
static class FakeProgressive implements Progressive {
private float progress = 0F;
@Override
public float getProgress() {
return progress;
}
void setProgress(float progress) {
this.progress = progress;
}
}
/**
* A dummy reporter for {@link LoadJob.ResourceUsageMatcherRunner}.
*/
private static class DummyReporter extends StatusReporter {
private Progressive progress;
DummyReporter(Progressive progress) {
this.progress = progress;
}
@Override
public org.apache.hadoop.mapreduce.Counter getCounter(Enum<?> name) {
return null;
}
@Override
public org.apache.hadoop.mapreduce.Counter getCounter(String group,
String name) {
return null;
}
@Override
public void progress() {
}
@Override
public float getProgress() {
return progress.getProgress();
}
@Override
public void setStatus(String status) {
}
}
// Extends ResourceUsageMatcherRunner for testing.
@SuppressWarnings("unchecked")
private static class FakeResourceUsageMatcherRunner
extends ResourceUsageMatcherRunner {
FakeResourceUsageMatcherRunner(TaskInputOutputContext context,
ResourceUsageMetrics metrics) {
super(context, metrics);
}
// test ResourceUsageMatcherRunner
void test() throws Exception {
super.match();
}
}
/**
* Test {@link LoadJob.ResourceUsageMatcherRunner}.
*/
@Test
@SuppressWarnings("unchecked")
public void testResourceUsageMatcherRunner() throws Exception {
Configuration conf = new Configuration();
FakeProgressive progress = new FakeProgressive();
// set the resource calculator plugin
conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
DummyResourceCalculatorPlugin.class,
ResourceCalculatorPlugin.class);
// set the resources
// set the resource implementation class
conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
TestResourceUsageEmulatorPlugin.class,
ResourceUsageEmulatorPlugin.class);
long currentTime = System.currentTimeMillis();
// initialize the matcher class
TaskAttemptID id = new TaskAttemptID("test", 1, TaskType.MAP, 1, 1);
StatusReporter reporter = new DummyReporter(progress);
TaskInputOutputContext context =
new MapContextImpl(conf, id, null, null, null, reporter, null);
FakeResourceUsageMatcherRunner matcher =
new FakeResourceUsageMatcherRunner(context, null);
// check if the matcher initialized the plugin
String identifier = TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
long initTime =
TestResourceUsageEmulatorPlugin.testInitialization(identifier, conf);
assertTrue("ResourceUsageMatcherRunner failed to initialize the"
+ " configured plugin", initTime > currentTime);
// check the progress
assertEquals("Progress mismatch in ResourceUsageMatcherRunner",
0, progress.getProgress(), 0D);
// call match() and check progress
progress.setProgress(0.01f);
currentTime = System.currentTimeMillis();
matcher.test();
long emulateTime =
TestResourceUsageEmulatorPlugin.testEmulation(identifier, conf);
assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate"
+ " the configured plugin", emulateTime > currentTime);
}
/**
* Test {@link CumulativeCpuUsageEmulatorPlugin}'s core CPU usage emulation
* engine.
*/
@Test
public void testCpuUsageEmulator() throws IOException {
// test CpuUsageEmulator calibration with fake resource calculator plugin
long target = 100000L; // 100 secs
int unitUsage = 50;
FakeCpuUsageEmulatorCore fakeCpuEmulator = new FakeCpuUsageEmulatorCore();
fakeCpuEmulator.setUnitUsage(unitUsage);
FakeResourceUsageMonitor fakeMonitor =
new FakeResourceUsageMonitor(fakeCpuEmulator);
// calibrate for 100ms
fakeCpuEmulator.calibrate(fakeMonitor, target);
// by default, CpuUsageEmulator.calibrate() will consume 100ms of CPU usage
assertEquals("Fake calibration failed",
100, fakeMonitor.getCumulativeCpuTime());
assertEquals("Fake calibration failed",
100, fakeCpuEmulator.getCpuUsage());
// by default, CpuUsageEmulator.performUnitComputation() will be called
// twice
assertEquals("Fake calibration failed",
2, fakeCpuEmulator.getNumCalls());
}
/**
* This is a dummy class that fakes CPU usage.
*/
private static class FakeCpuUsageEmulatorCore
extends DefaultCpuUsageEmulator {
private int numCalls = 0;
private int unitUsage = 1;
private int cpuUsage = 0;
@Override
protected void performUnitComputation() {
++numCalls;
cpuUsage += unitUsage;
}
int getNumCalls() {
return numCalls;
}
int getCpuUsage() {
return cpuUsage;
}
void reset() {
numCalls = 0;
cpuUsage = 0;
}
void setUnitUsage(int unitUsage) {
this.unitUsage = unitUsage;
}
}
// Creates a ResourceUsageMetrics object from the target usage
static ResourceUsageMetrics createMetrics(long target) {
ResourceUsageMetrics metrics = new ResourceUsageMetrics();
metrics.setCumulativeCpuUsage(target);
metrics.setVirtualMemoryUsage(target);
metrics.setPhysicalMemoryUsage(target);
metrics.setHeapUsage(target);
return metrics;
}
/**
* Test {@link CumulativeCpuUsageEmulatorPlugin}.
*/
@Test
public void testCumulativeCpuUsageEmulatorPlugin() throws Exception {
Configuration conf = new Configuration();
long targetCpuUsage = 1000L;
int unitCpuUsage = 50;
// fake progress indicator
FakeProgressive fakeProgress = new FakeProgressive();
// fake cpu usage generator
FakeCpuUsageEmulatorCore fakeCore = new FakeCpuUsageEmulatorCore();
fakeCore.setUnitUsage(unitCpuUsage);
// a cumulative cpu usage emulator with fake core
CumulativeCpuUsageEmulatorPlugin cpuPlugin =
new CumulativeCpuUsageEmulatorPlugin(fakeCore);
// test with invalid or missing resource usage value
ResourceUsageMetrics invalidUsage = createMetrics(0);
cpuPlugin.initialize(conf, invalidUsage, null, null);
// test if disabled cpu emulation plugin's emulate() call is a no-operation
// this will test if the emulation plugin is disabled or not
int numCallsPre = fakeCore.getNumCalls();
long cpuUsagePre = fakeCore.getCpuUsage();
cpuPlugin.emulate();
int numCallsPost = fakeCore.getNumCalls();
long cpuUsagePost = fakeCore.getCpuUsage();
// test if no calls are made cpu usage emulator core
assertEquals("Disabled cumulative CPU usage emulation plugin works!",
numCallsPre, numCallsPost);
// test if no calls are made cpu usage emulator core
assertEquals("Disabled cumulative CPU usage emulation plugin works!",
cpuUsagePre, cpuUsagePost);
// test with get progress
float progress = cpuPlugin.getProgress();
assertEquals("Invalid progress of disabled cumulative CPU usage emulation "
+ "plugin!", 1.0f, progress, 0f);
// test with valid resource usage value
ResourceUsageMetrics metrics = createMetrics(targetCpuUsage);
// fake monitor
ResourceCalculatorPlugin monitor = new FakeResourceUsageMonitor(fakeCore);
// test with default emulation interval
testEmulationAccuracy(conf, fakeCore, monitor, metrics, cpuPlugin,
targetCpuUsage, targetCpuUsage / unitCpuUsage);
// test with custom value for emulation interval of 20%
conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
0.2F);
testEmulationAccuracy(conf, fakeCore, monitor, metrics, cpuPlugin,
targetCpuUsage, targetCpuUsage / unitCpuUsage);
// test if emulation interval boundary is respected (unit usage = 1)
// test the case where the current progress is less than threshold
fakeProgress = new FakeProgressive(); // initialize
fakeCore.reset();
fakeCore.setUnitUsage(1);
conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
0.25F);
cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
// take a snapshot after the initialization
long initCpuUsage = monitor.getCumulativeCpuTime();
long initNumCalls = fakeCore.getNumCalls();
// test with 0 progress
testEmulationBoundary(0F, fakeCore, fakeProgress, cpuPlugin, initCpuUsage,
initNumCalls, "[no-op, 0 progress]");
// test with 24% progress
testEmulationBoundary(0.24F, fakeCore, fakeProgress, cpuPlugin,
initCpuUsage, initNumCalls, "[no-op, 24% progress]");
// test with 25% progress
// target = 1000ms, target emulation at 25% = 250ms,
// weighed target = 1000 * 0.25^4 (we are using progress^4 as the weight)
// ~ 4
// but current usage = init-usage = 100, hence expected = 100
testEmulationBoundary(0.25F, fakeCore, fakeProgress, cpuPlugin,
initCpuUsage, initNumCalls, "[op, 25% progress]");
// test with 80% progress
// target = 1000ms, target emulation at 80% = 800ms,
// weighed target = 1000 * 0.25^4 (we are using progress^4 as the weight)
// ~ 410
// current-usage = init-usage = 100, hence expected-usage = 410
testEmulationBoundary(0.80F, fakeCore, fakeProgress, cpuPlugin, 410, 410,
"[op, 80% progress]");
// now test if the final call with 100% progress ramps up the CPU usage
testEmulationBoundary(1F, fakeCore, fakeProgress, cpuPlugin, targetCpuUsage,
targetCpuUsage, "[op, 100% progress]");
// test if emulation interval boundary is respected (unit usage = 50)
// test the case where the current progress is less than threshold
fakeProgress = new FakeProgressive(); // initialize
fakeCore.reset();
fakeCore.setUnitUsage(unitCpuUsage);
conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,
0.40F);
cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
// take a snapshot after the initialization
initCpuUsage = monitor.getCumulativeCpuTime();
initNumCalls = fakeCore.getNumCalls();
// test with 0 progress
testEmulationBoundary(0F, fakeCore, fakeProgress, cpuPlugin, initCpuUsage,
initNumCalls, "[no-op, 0 progress]");
// test with 39% progress
testEmulationBoundary(0.39F, fakeCore, fakeProgress, cpuPlugin,
initCpuUsage, initNumCalls, "[no-op, 39% progress]");
// test with 40% progress
// target = 1000ms, target emulation at 40% = 4000ms,
// weighed target = 1000 * 0.40^4 (we are using progress^4 as the weight)
// ~ 26
// current-usage = init-usage = 100, hence expected-usage = 100
testEmulationBoundary(0.40F, fakeCore, fakeProgress, cpuPlugin,
initCpuUsage, initNumCalls, "[op, 40% progress]");
// test with 90% progress
// target = 1000ms, target emulation at 90% = 900ms,
// weighed target = 1000 * 0.90^4 (we are using progress^4 as the weight)
// ~ 657
// current-usage = init-usage = 100, hence expected-usage = 657 but
// the fake-core increases in steps of 50, hence final target = 700
testEmulationBoundary(0.90F, fakeCore, fakeProgress, cpuPlugin, 700,
700 / unitCpuUsage, "[op, 90% progress]");
// now test if the final call with 100% progress ramps up the CPU usage
testEmulationBoundary(1F, fakeCore, fakeProgress, cpuPlugin, targetCpuUsage,
targetCpuUsage / unitCpuUsage, "[op, 100% progress]");
}
// test whether the CPU usage emulator achieves the desired target using
// desired calls to the underling core engine.
private static void testEmulationAccuracy(Configuration conf,
FakeCpuUsageEmulatorCore fakeCore,
ResourceCalculatorPlugin monitor,
ResourceUsageMetrics metrics,
CumulativeCpuUsageEmulatorPlugin cpuPlugin,
long expectedTotalCpuUsage, long expectedTotalNumCalls)
throws Exception {
FakeProgressive fakeProgress = new FakeProgressive();
fakeCore.reset();
cpuPlugin.initialize(conf, metrics, monitor, fakeProgress);
int numLoops = 0;
while (fakeProgress.getProgress() < 1) {
++numLoops;
float progress = (float)numLoops / 100;
fakeProgress.setProgress(progress);
cpuPlugin.emulate();
}
// test if the resource plugin shows the expected invocations
assertEquals("Cumulative cpu usage emulator plugin failed (num calls)!",
expectedTotalNumCalls, fakeCore.getNumCalls(), 0L);
// test if the resource plugin shows the expected usage
assertEquals("Cumulative cpu usage emulator plugin failed (total usage)!",
expectedTotalCpuUsage, fakeCore.getCpuUsage(), 0L);
}
// tests if the CPU usage emulation plugin emulates only at the expected
// progress gaps
private static void testEmulationBoundary(float progress,
FakeCpuUsageEmulatorCore fakeCore, FakeProgressive fakeProgress,
CumulativeCpuUsageEmulatorPlugin cpuPlugin, long expectedTotalCpuUsage,
long expectedTotalNumCalls, String info) throws Exception {
fakeProgress.setProgress(progress);
cpuPlugin.emulate();
assertEquals("Emulation interval test for cpu usage failed " + info + "!",
expectedTotalCpuUsage, fakeCore.getCpuUsage(), 0L);
assertEquals("Emulation interval test for num calls failed " + info + "!",
expectedTotalNumCalls, fakeCore.getNumCalls(), 0L);
}
}
| 23,368 | 36.998374 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestUserResolve.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.net.URI;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
public class TestUserResolve {
private static Path rootDir = null;
private static Configuration conf = null;
private static FileSystem fs = null;
@BeforeClass
public static void createRootDir() throws IOException {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
rootDir = new Path(new Path(System.getProperty("test.build.data", "/tmp"))
.makeQualified(fs), "gridmixUserResolve");
}
/**
* Creates users file with the content as the String usersFileContent.
* @param usersFilePath the path to the file that is to be created
* @param usersFileContent Content of users file
* @throws IOException
*/
private static void writeUserList(Path usersFilePath, String usersFileContent)
throws IOException {
FSDataOutputStream out = null;
try {
out = fs.create(usersFilePath, true);
out.writeBytes(usersFileContent);
} finally {
if (out != null) {
out.close();
}
}
}
/**
* Validate RoundRobinUserResolver's behavior for bad user resource file.
* RoundRobinUserResolver.setTargetUsers() should throw proper Exception for
* the cases like
* <li> non existent user resource file and
* <li> empty user resource file
*
* @param rslv The RoundRobinUserResolver object
* @param userRsrc users file
* @param expectedErrorMsg expected error message
*/
private void validateBadUsersFile(UserResolver rslv, URI userRsrc,
String expectedErrorMsg) {
boolean fail = false;
try {
rslv.setTargetUsers(userRsrc, conf);
} catch (IOException e) {
assertTrue("Exception message from RoundRobinUserResolver is wrong",
e.getMessage().equals(expectedErrorMsg));
fail = true;
}
assertTrue("User list required for RoundRobinUserResolver", fail);
}
/**
* Validate the behavior of {@link RoundRobinUserResolver} for different
* user resource files like
* <li> Empty user resource file
* <li> Non existent user resource file
* <li> User resource file with valid content
* @throws Exception
*/
@Test
public void testRoundRobinResolver() throws Exception {
final UserResolver rslv = new RoundRobinUserResolver();
Path usersFilePath = new Path(rootDir, "users");
URI userRsrc = new URI(usersFilePath.toString());
// Check if the error message is as expected for non existent
// user resource file.
fs.delete(usersFilePath, false);
String expectedErrorMsg = "File " + userRsrc + " does not exist";
validateBadUsersFile(rslv, userRsrc, expectedErrorMsg);
// Check if the error message is as expected for empty user resource file
writeUserList(usersFilePath, "");// creates empty users file
expectedErrorMsg =
RoundRobinUserResolver.buildEmptyUsersErrorMsg(userRsrc);
validateBadUsersFile(rslv, userRsrc, expectedErrorMsg);
// Create user resource file with valid content like older users list file
// with usernames and groups
writeUserList(usersFilePath,
"user0,groupA,groupB,groupC\nuser1,groupA,groupC\n");
validateValidUsersFile(rslv, userRsrc);
// Create user resource file with valid content with
// usernames with groups and without groups
writeUserList(usersFilePath, "user0,groupA,groupB\nuser1,");
validateValidUsersFile(rslv, userRsrc);
// Create user resource file with valid content with
// usernames without groups
writeUserList(usersFilePath, "user0\nuser1");
validateValidUsersFile(rslv, userRsrc);
}
// Validate RoundRobinUserResolver for the case of
// user resource file with valid content.
private void validateValidUsersFile(UserResolver rslv, URI userRsrc)
throws IOException {
assertTrue(rslv.setTargetUsers(userRsrc, conf));
UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser("hfre0");
assertEquals("user0", rslv.getTargetUgi(ugi1).getUserName());
assertEquals("user1",
rslv.getTargetUgi(UserGroupInformation.createRemoteUser("hfre1"))
.getUserName());
assertEquals("user0",
rslv.getTargetUgi(UserGroupInformation.createRemoteUser("hfre2"))
.getUserName());
assertEquals("user0", rslv.getTargetUgi(ugi1).getUserName());
assertEquals("user1",
rslv.getTargetUgi(UserGroupInformation.createRemoteUser("hfre3"))
.getUserName());
// Verify if same user comes again, its mapped user name should be
// correct even though UGI is constructed again.
assertEquals("user0", rslv.getTargetUgi(
UserGroupInformation.createRemoteUser("hfre0")).getUserName());
assertEquals("user0",
rslv.getTargetUgi(UserGroupInformation.createRemoteUser("hfre5"))
.getUserName());
assertEquals("user0",
rslv.getTargetUgi(UserGroupInformation.createRemoteUser("hfre0"))
.getUserName());
}
@Test
public void testSubmitterResolver() throws Exception {
final UserResolver rslv = new SubmitterUserResolver();
assertFalse(rslv.needsTargetUsersList());
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
assertEquals(ugi, rslv.getTargetUgi((UserGroupInformation)null));
}
}
| 6,452 | 36.300578 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.util.Random;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataOutputBuffer;
public class TestRecordFactory {
private static final Log LOG = LogFactory.getLog(TestRecordFactory.class);
public static void testFactory(long targetBytes, long targetRecs)
throws Exception {
final Configuration conf = new Configuration();
final GridmixKey key = new GridmixKey();
final GridmixRecord val = new GridmixRecord();
LOG.info("Target bytes/records: " + targetBytes + "/" + targetRecs);
final RecordFactory f = new AvgRecordFactory(targetBytes, targetRecs, conf);
targetRecs = targetRecs <= 0 && targetBytes >= 0
? Math.max(1,
targetBytes
/ conf.getInt(AvgRecordFactory.GRIDMIX_MISSING_REC_SIZE,
64 * 1024))
: targetRecs;
long records = 0L;
final DataOutputBuffer out = new DataOutputBuffer();
while (f.next(key, val)) {
++records;
key.write(out);
val.write(out);
}
assertEquals(targetRecs, records);
assertEquals(targetBytes, out.getLength());
}
@Test
public void testRandom() throws Exception {
final Random r = new Random();
final long targetBytes = r.nextInt(1 << 20) + 3 * (1 << 14);
final long targetRecs = r.nextInt(1 << 14);
testFactory(targetBytes, targetRecs);
}
@Test
public void testAvg() throws Exception {
final Random r = new Random();
final long avgsize = r.nextInt(1 << 10) + 1;
final long targetRecs = r.nextInt(1 << 14);
testFactory(targetRecs * avgsize, targetRecs);
}
@Test
public void testZero() throws Exception {
final Random r = new Random();
final long targetBytes = r.nextInt(1 << 20);
testFactory(targetBytes, 0);
}
}
| 2,793 | 33.073171 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import static org.junit.Assert.*;
public class TestSleepJob extends CommonJobTest {
public static final Log LOG = LogFactory.getLog(Gridmix.class);
static {
((Log4JLogger) LogFactory.getLog("org.apache.hadoop.mapred.gridmix"))
.getLogger().setLevel(Level.DEBUG);
}
static GridmixJobSubmissionPolicy policy = GridmixJobSubmissionPolicy.REPLAY;
@BeforeClass
public static void init() throws IOException {
GridmixTestUtils.initCluster(TestSleepJob.class);
}
@AfterClass
public static void shutDown() throws IOException {
GridmixTestUtils.shutdownCluster();
}
@Test (timeout=600000)
public void testMapTasksOnlySleepJobs() throws Exception {
Configuration configuration = GridmixTestUtils.mrvl.getConfig();
DebugJobProducer jobProducer = new DebugJobProducer(5, configuration);
configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
JobStory story;
int seq = 1;
while ((story = jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(configuration, 0,
story, new Path("ignored"), ugi, seq++);
gridmixJob.buildSplits(null);
Job job = gridmixJob.call();
assertEquals(0, job.getNumReduceTasks());
}
jobProducer.close();
assertEquals(6, seq);
}
/*
* test RandomLocation
*/
@Test (timeout=600000)
public void testRandomLocation() throws Exception {
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
testRandomLocation(1, 10, ugi);
testRandomLocation(2, 10, ugi);
}
// test Serial submit
@Test (timeout=600000)
public void testSerialSubmit() throws Exception {
// set policy
policy = GridmixJobSubmissionPolicy.SERIAL;
LOG.info("Serial started at " + System.currentTimeMillis());
doSubmission(JobCreator.SLEEPJOB.name(), false);
LOG.info("Serial ended at " + System.currentTimeMillis());
}
@Test (timeout=600000)
public void testReplaySubmit() throws Exception {
policy = GridmixJobSubmissionPolicy.REPLAY;
LOG.info(" Replay started at " + System.currentTimeMillis());
doSubmission(JobCreator.SLEEPJOB.name(), false);
LOG.info(" Replay ended at " + System.currentTimeMillis());
}
@Test (timeout=600000)
public void testStressSubmit() throws Exception {
policy = GridmixJobSubmissionPolicy.STRESS;
LOG.info(" Replay started at " + System.currentTimeMillis());
doSubmission(JobCreator.SLEEPJOB.name(), false);
LOG.info(" Replay ended at " + System.currentTimeMillis());
}
private void testRandomLocation(int locations, int njobs,
UserGroupInformation ugi) throws Exception {
Configuration configuration = new Configuration();
DebugJobProducer jobProducer = new DebugJobProducer(njobs, configuration);
Configuration jconf = GridmixTestUtils.mrvl.getConfig();
jconf.setInt(JobCreator.SLEEPJOB_RANDOM_LOCATIONS, locations);
JobStory story;
int seq = 1;
while ((story = jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(jconf, 0,
story, new Path("ignored"), ugi, seq++);
gridmixJob.buildSplits(null);
List<InputSplit> splits = new SleepJob.SleepInputFormat()
.getSplits(gridmixJob.getJob());
for (InputSplit split : splits) {
assertEquals(locations, split.getLocations().length);
}
}
jobProducer.close();
}
}
| 4,986 | 33.874126 | 84 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.