repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.state;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.rumen.Anonymizer;
import org.apache.hadoop.tools.rumen.datatypes.DataType;
import org.codehaus.jackson.JsonEncoding;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.Version;
import org.codehaus.jackson.annotate.JsonIgnore;
import org.codehaus.jackson.map.DeserializationConfig;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
import org.codehaus.jackson.map.module.SimpleModule;
/**
* A pool of states. States used by {@link DataType}'s can be managed the
* {@link StatePool}. {@link StatePool} also supports persistence. Persistence
* is key to share states across multiple {@link Anonymizer} runs.
*/
@SuppressWarnings("unchecked")
public class StatePool {
private static final long VERSION = 1L;
private boolean isUpdated = false;
private boolean isInitialized = false;
private Configuration conf;
// persistence configuration
public static final String DIR_CONFIG = "rumen.anonymization.states.dir";
public static final String RELOAD_CONFIG =
"rumen.anonymization.states.reload";
public static final String PERSIST_CONFIG =
"rumen.anonymization.states.persist";
// internal state management configs
private static final String COMMIT_STATE_FILENAME = "latest";
private static final String CURRENT_STATE_FILENAME = "temp";
private String timeStamp;
private Path persistDirPath;
private boolean reload;
private boolean persist;
/**
* A wrapper class that binds the state implementation to its implementing
* class name.
*/
public static class StatePair {
private String className;
private State state;
public StatePair(State state) {
this.className = state.getClass().getName();
this.state = state;
}
public String getClassName() {
return className;
}
public void setClassName(String className) {
this.className = className;
}
public State getState() {
return state;
}
public void setState(State state) {
this.state = state;
}
}
/**
* Identifies to identify and cache {@link State}s.
*/
private HashMap<String, StatePair> pool = new HashMap<String, StatePair>();
public void addState(Class id, State state) {
if (pool.containsKey(id.getName())) {
throw new RuntimeException("State '" + state.getName() + "' added for the"
+ " class " + id.getName() + " already exists!");
}
isUpdated = true;
pool.put(id.getName(), new StatePair(state));
}
public State getState(Class clazz) {
return pool.containsKey(clazz.getName())
? pool.get(clazz.getName()).getState()
: null;
}
// For testing
@JsonIgnore
public boolean isUpdated() {
if (!isUpdated) {
for (StatePair statePair : pool.values()) {
// if one of the states have changed, then the pool is dirty
if (statePair.getState().isUpdated()) {
isUpdated = true;
return true;
}
}
}
return isUpdated;
}
/**
* Initialized the {@link StatePool}. This API also reloads the previously
* persisted state. Note that the {@link StatePool} should be initialized only
* once.
*/
public void initialize(Configuration conf) throws Exception {
if (isInitialized) {
throw new RuntimeException("StatePool is already initialized!");
}
this.conf = conf;
String persistDir = conf.get(DIR_CONFIG);
reload = conf.getBoolean(RELOAD_CONFIG, false);
persist = conf.getBoolean(PERSIST_CONFIG, false);
// reload if configured
if (reload || persist) {
System.out.println("State Manager initializing. State directory : "
+ persistDir);
System.out.println("Reload:" + reload + " Persist:" + persist);
if (persistDir == null) {
throw new RuntimeException("No state persist directory configured!"
+ " Disable persistence.");
} else {
this.persistDirPath = new Path(persistDir);
}
} else {
System.out.println("State Manager disabled.");
}
// reload
reload();
// now set the timestamp
DateFormat formatter =
new SimpleDateFormat("dd-MMM-yyyy-hh'H'-mm'M'-ss'S'");
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(System.currentTimeMillis());
timeStamp = formatter.format(calendar.getTime());
isInitialized = true;
}
private void reload() throws Exception {
if (reload) {
// Reload persisted entries
Path stateFilename = new Path(persistDirPath, COMMIT_STATE_FILENAME);
FileSystem fs = stateFilename.getFileSystem(conf);
if (fs.exists(stateFilename)) {
reloadState(stateFilename, conf);
} else {
throw new RuntimeException("No latest state persist directory found!"
+ " Disable persistence and run.");
}
}
}
private void reloadState(Path stateFile, Configuration conf)
throws Exception {
FileSystem fs = stateFile.getFileSystem(conf);
if (fs.exists(stateFile)) {
System.out.println("Reading state from " + stateFile.toString());
FSDataInputStream in = fs.open(stateFile);
read(in);
in.close();
} else {
System.out.println("No state information found for " + stateFile);
}
}
private void read(DataInput in) throws IOException {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(
DeserializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
// define a module
SimpleModule module = new SimpleModule("State Serializer",
new Version(0, 1, 1, "FINAL"));
// add the state deserializer
module.addDeserializer(StatePair.class, new StateDeserializer());
// register the module with the object-mapper
mapper.registerModule(module);
JsonParser parser =
mapper.getJsonFactory().createJsonParser((DataInputStream)in);
StatePool statePool = mapper.readValue(parser, StatePool.class);
this.setStates(statePool.getStates());
parser.close();
}
/**
* Persists the current state to the state directory. The state will be
* persisted to the 'latest' file in the state directory.
*/
public void persist() throws IOException {
if (!persist) {
return;
}
if (isUpdated()) {
System.out.println("State is updated! Committing.");
Path currStateFile = new Path(persistDirPath, CURRENT_STATE_FILENAME);
Path commitStateFile = new Path(persistDirPath, COMMIT_STATE_FILENAME);
FileSystem fs = currStateFile.getFileSystem(conf);
System.out.println("Starting the persist phase. Persisting to "
+ currStateFile.toString());
// persist current state
// write the contents of the current state to the current(temp) directory
FSDataOutputStream out = fs.create(currStateFile, true);
write(out);
out.close();
System.out.println("Persist phase over. The best known un-committed state"
+ " is located at " + currStateFile.toString());
// commit (phase-1)
// copy the previous commit file to the relocation file
if (fs.exists(commitStateFile)) {
Path commitRelocationFile = new Path(persistDirPath, timeStamp);
System.out.println("Starting the pre-commit phase. Moving the previous "
+ "best known state to " + commitRelocationFile.toString());
// copy the commit file to the relocation file
FileUtil.copy(fs,commitStateFile, fs, commitRelocationFile, false,
conf);
}
// commit (phase-2)
System.out.println("Starting the commit phase. Committing the states in "
+ currStateFile.toString());
FileUtil.copy(fs, currStateFile, fs, commitStateFile, true, true, conf);
System.out.println("Commit phase successful! The best known committed "
+ "state is located at " + commitStateFile.toString());
} else {
System.out.println("State not updated! No commit required.");
}
}
private void write(DataOutput out) throws IOException {
// This is just a JSON experiment
System.out.println("Dumping the StatePool's in JSON format.");
ObjectMapper outMapper = new ObjectMapper();
outMapper.configure(
SerializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
// define a module
SimpleModule module = new SimpleModule("State Serializer",
new Version(0, 1, 1, "FINAL"));
// add the state serializer
//module.addSerializer(State.class, new StateSerializer());
// register the module with the object-mapper
outMapper.registerModule(module);
JsonFactory outFactory = outMapper.getJsonFactory();
JsonGenerator jGen =
outFactory.createJsonGenerator((DataOutputStream)out, JsonEncoding.UTF8);
jGen.useDefaultPrettyPrinter();
jGen.writeObject(this);
jGen.close();
}
/**
* Getters and setters for JSON serialization
*/
/**
* To be invoked only by the Jackson JSON serializer.
*/
public long getVersion() {
return VERSION;
}
/**
* To be invoked only by the Jackson JSON deserializer.
*/
public void setVersion(long version) {
if (version != VERSION) {
throw new RuntimeException("Version mismatch! Expected " + VERSION
+ " got " + version);
}
}
/**
* To be invoked only by the Jackson JSON serializer.
*/
public HashMap<String, StatePair> getStates() {
return pool;
}
/**
* To be invoked only by the Jackson JSON deserializer.
*/
public void setStates(HashMap<String, StatePair> states) {
if (pool.size() > 0) {
throw new RuntimeException("Pool not empty!");
}
//TODO Should we do a clone?
this.pool = states;
}
}
| 11,496 | 32.324638 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.rumen.ParsedHost;
import org.apache.hadoop.tools.rumen.anonymization.WordList;
import org.apache.hadoop.tools.rumen.state.State;
import org.apache.hadoop.tools.rumen.state.StatePool;
import org.codehaus.jackson.annotate.JsonIgnore;
/**
* Represents the cluster host.
*/
public class NodeName implements AnonymizableDataType<String> {
private String hostName;
private String rackName;
private String nodeName;
private String anonymizedNodeName;
public static final NodeName ROOT = new NodeName("<root>");
/**
* A composite state for node-name.
*/
public static class NodeNameState implements State {
private WordList rackNameState = new WordList("rack");
private WordList hostNameState = new WordList("host");
@Override
@JsonIgnore
public boolean isUpdated() {
return rackNameState.isUpdated() || hostNameState.isUpdated();
}
public WordList getRackNameState() {
return rackNameState;
}
public WordList getHostNameState() {
return hostNameState;
}
public void setRackNameState(WordList state) {
this.rackNameState = state;
}
public void setHostNameState(WordList state) {
this.hostNameState = state;
}
@Override
public String getName() {
return "node";
}
@Override
public void setName(String name) {
// for now, simply assert since this class has a hardcoded name
if (!getName().equals(name)) {
throw new RuntimeException("State name mismatch! Expected '"
+ getName() + "' but found '" + name + "'.");
}
}
}
public NodeName(String nodeName) {
this.nodeName = nodeName;
ParsedHost pHost = ParsedHost.parse(nodeName);
if (pHost == null) {
this.rackName = null;
this.hostName = nodeName;
} else {
//TODO check for null and improve .. possibly call NodeName(r,h)
this.rackName = pHost.getRackName();
this.hostName = pHost.getNodeName();
}
}
public NodeName(String rName, String hName) {
rName = (rName == null || rName.length() == 0) ? null : rName;
hName = (hName == null || hName.length() == 0) ? null : hName;
if (hName == null) {
nodeName = rName;
rackName = rName;
} else if (rName == null) {
nodeName = hName;
ParsedHost pHost = ParsedHost.parse(nodeName);
if (pHost == null) {
this.rackName = null;
this.hostName = hName;
} else {
this.rackName = pHost.getRackName();
this.hostName = pHost.getNodeName();
}
} else {
rackName = rName;
this.hostName = hName;
this.nodeName = "/" + rName + "/" + hName;
}
}
public String getHostName() {
return hostName;
}
public String getRackName() {
return rackName;
}
@Override
public String getValue() {
return nodeName;
}
@Override
public String getAnonymizedValue(StatePool statePool, Configuration conf) {
if (this.getValue().equals(ROOT.getValue())) {
return getValue();
}
if (anonymizedNodeName == null) {
anonymize(statePool);
}
return anonymizedNodeName;
}
private void anonymize(StatePool pool) {
StringBuffer buf = new StringBuffer();
NodeNameState state = (NodeNameState) pool.getState(getClass());
if (state == null) {
state = new NodeNameState();
pool.addState(getClass(), state);
}
if (rackName != null && hostName != null) {
buf.append('/');
buf.append(anonymize(rackName, state.getRackNameState()));
buf.append('/');
buf.append(anonymize(hostName, state.getHostNameState()));
} else {
if (state.getRackNameState().contains(nodeName) || rackName != null) {
buf.append(anonymize(nodeName, state.getRackNameState()));
} else {
buf.append(anonymize(nodeName, state.getHostNameState()));
}
}
anonymizedNodeName = buf.toString();
}
//TODO There is no caching for saving memory.
private static String anonymize(String data, WordList wordList) {
if (data == null) {
return null;
}
if (!wordList.contains(data)) {
wordList.add(data);
}
return wordList.getName() + wordList.indexOf(data);
}
}
| 5,229 | 28.548023 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/DefaultAnonymizableDataType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.rumen.anonymization.WordList;
import org.apache.hadoop.tools.rumen.anonymization.WordListAnonymizerUtility;
import org.apache.hadoop.tools.rumen.state.StatePool;
/**
* Represents a default anonymizable Rumen data-type. It uses
* {@link WordListAnonymizerUtility} for anonymization.
*/
public abstract class DefaultAnonymizableDataType
implements AnonymizableDataType<String> {
private static final String DEFAULT_PREFIX = "data";
protected String getPrefix() {
return DEFAULT_PREFIX;
}
// Determines if the contained data needs anonymization
protected boolean needsAnonymization(Configuration conf) {
return true;
}
@Override
public final String getAnonymizedValue(StatePool statePool,
Configuration conf) {
if (needsAnonymization(conf)) {
WordList state = (WordList) statePool.getState(getClass());
if (state == null) {
state = new WordList(getPrefix());
statePool.addState(getClass(), state);
}
return anonymize(getValue(), state);
} else {
return getValue();
}
}
private static String anonymize(String data, WordList wordList) {
if (data == null) {
return null;
}
if (!wordList.contains(data)) {
wordList.add(data);
}
return wordList.getName() + wordList.indexOf(data);
}
}
| 2,290 | 33.19403 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/DefaultDataType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
/**
* This represents the default java data-types (like int, long, float etc).
*/
public class DefaultDataType implements DataType<String> {
private String value;
public DefaultDataType(String value) {
this.value = value;
}
/**
* Get the value of the attribute.
*/
@Override
public String getValue() {
return value;
}
}
| 1,212 | 31.783784 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/JobProperties.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.rumen.datatypes.util.JobPropertyParser;
import org.apache.hadoop.tools.rumen.datatypes.util.MapReduceJobPropertiesParser;
import org.apache.hadoop.tools.rumen.state.StatePool;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This represents the job configuration properties.
*/
public class JobProperties implements AnonymizableDataType<Properties> {
public static final String PARSERS_CONFIG_KEY =
"rumen.datatypes.jobproperties.parsers";
private final Properties jobProperties;
public JobProperties() {
this(new Properties());
}
public JobProperties(Properties properties) {
this.jobProperties = properties;
}
public Properties getValue() {
return jobProperties;
}
@Override
public Properties getAnonymizedValue(StatePool statePool,
Configuration conf) {
Properties filteredProperties = null;
List<JobPropertyParser> pList = new ArrayList<JobPropertyParser>(1);
// load the parsers
String config = conf.get(PARSERS_CONFIG_KEY);
if (config != null) {
@SuppressWarnings("unchecked")
Class<JobPropertyParser>[] parsers =
(Class[])conf.getClasses(PARSERS_CONFIG_KEY);
for (Class<JobPropertyParser> c : parsers) {
JobPropertyParser parser = ReflectionUtils.newInstance(c, conf);
pList.add(parser);
}
} else {
// add the default MapReduce filter
JobPropertyParser parser = new MapReduceJobPropertiesParser();
pList.add(parser);
}
// filter out the desired config key-value pairs
if (jobProperties != null) {
filteredProperties = new Properties();
// define a configuration object and load it with original job properties
for (Map.Entry<Object, Object> entry : jobProperties.entrySet()) {
//TODO Check for null key/value?
String key = entry.getKey().toString();
String value = entry.getValue().toString();
// find a parser for this key
for (JobPropertyParser p : pList) {
DataType<?> pValue = p.parseJobProperty(key, value);
if (pValue != null) {
filteredProperties.put(key, pValue);
break;
}
}
}
}
return filteredProperties;
}
}
| 3,310 | 34.602151 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/UserName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
/**
* Represents a user's name.
*/
public class UserName extends DefaultAnonymizableDataType {
private final String userName;
public UserName(String userName) {
super();
this.userName = userName;
}
@Override
public String getValue() {
return userName;
}
@Override
protected String getPrefix() {
return "user";
}
}
| 1,215 | 29.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/FileName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.rumen.anonymization.WordList;
import org.apache.hadoop.tools.rumen.anonymization.WordListAnonymizerUtility;
import org.apache.hadoop.tools.rumen.state.State;
import org.apache.hadoop.tools.rumen.state.StatePool;
import org.apache.hadoop.util.StringUtils;
/**
* Represents a file's location.
*
* Currently, only filenames that can be represented using {@link Path} are
* supported.
*/
public class FileName implements AnonymizableDataType<String> {
private final String fileName;
private String anonymizedFileName;
private static final String PREV_DIR = "..";
private static final String[] KNOWN_SUFFIXES =
new String[] {".xml", ".jar", ".txt", ".tar", ".zip", ".json", ".gzip",
".lzo"};
/**
* A composite state for filename.
*/
public static class FileNameState implements State {
private WordList dirState = new WordList("dir");
private WordList fileNameState = new WordList("file");
@Override
public boolean isUpdated() {
return dirState.isUpdated() || fileNameState.isUpdated();
}
public WordList getDirectoryState() {
return dirState;
}
public WordList getFileNameState() {
return fileNameState;
}
public void setDirectoryState(WordList state) {
this.dirState = state;
}
public void setFileNameState(WordList state) {
this.fileNameState = state;
}
@Override
public String getName() {
return "path";
}
@Override
public void setName(String name) {
// for now, simply assert since this class has a hardcoded name
if (!getName().equals(name)) {
throw new RuntimeException("State name mismatch! Expected '"
+ getName() + "' but found '" + name + "'.");
}
}
}
public FileName(String fileName) {
this.fileName = fileName;
}
@Override
public String getValue() {
return fileName;
}
@Override
public String getAnonymizedValue(StatePool statePool,
Configuration conf) {
if (anonymizedFileName == null) {
anonymize(statePool, conf);
}
return anonymizedFileName;
}
private void anonymize(StatePool statePool, Configuration conf) {
FileNameState fState = (FileNameState) statePool.getState(getClass());
if (fState == null) {
fState = new FileNameState();
statePool.addState(getClass(), fState);
}
String[] files = StringUtils.split(fileName);
String[] anonymizedFileNames = new String[files.length];
int i = 0;
for (String f : files) {
anonymizedFileNames[i++] =
anonymize(statePool, conf, fState, f);
}
anonymizedFileName = StringUtils.arrayToString(anonymizedFileNames);
}
private static String anonymize(StatePool statePool, Configuration conf,
FileNameState fState, String fileName) {
String ret = null;
try {
URI uri = new URI(fileName);
// anonymize the path i.e without the authority & scheme
ret =
anonymizePath(uri.getPath(), fState.getDirectoryState(),
fState.getFileNameState());
// anonymize the authority and scheme
String authority = uri.getAuthority();
String scheme = uri.getScheme();
if (scheme != null) {
String anonymizedAuthority = "";
if (authority != null) {
// anonymize the authority
NodeName hostName = new NodeName(null, uri.getHost());
anonymizedAuthority = hostName.getAnonymizedValue(statePool, conf);
}
ret = scheme + "://" + anonymizedAuthority + ret;
}
} catch (URISyntaxException use) {
throw new RuntimeException (use);
}
return ret;
}
// Anonymize the file-path
private static String anonymizePath(String path, WordList dState,
WordList fState) {
StringBuilder buffer = new StringBuilder();
StringTokenizer tokenizer = new StringTokenizer(path, Path.SEPARATOR, true);
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (Path.SEPARATOR.equals(token)) {
buffer.append(token);
} else if (Path.CUR_DIR.equals(token)) {
buffer.append(token);
} else if (PREV_DIR.equals(token)) {
buffer.append(token);
} else if (tokenizer.hasMoreTokens()){
// this component is a directory
buffer.append(anonymize(token, dState));
} else {
// this component is a file
buffer.append(anonymize(token, fState));
}
}
return buffer.toString();
}
//TODO There is no caching for saving memory.
private static String anonymize(String data, WordList wordList) {
if (data == null) {
return null;
}
if (WordListAnonymizerUtility.needsAnonymization(data)) {
String suffix = "";
String coreData = data;
// check and extract suffix
if (WordListAnonymizerUtility.hasSuffix(data, KNOWN_SUFFIXES)) {
// check if the data ends with a known suffix
String[] split =
WordListAnonymizerUtility.extractSuffix(data, KNOWN_SUFFIXES);
suffix = split[1];
coreData = split[0];
}
// check if the data is known content
//TODO [Chunking] Do this for sub-strings of data
String anonymizedData = coreData;
if (!WordListAnonymizerUtility.isKnownData(coreData)) {
if (!wordList.contains(coreData)) {
wordList.add(coreData);
}
anonymizedData = wordList.getName() + wordList.indexOf(coreData);
}
return anonymizedData + suffix;
} else {
return data;
}
}
}
| 6,822 | 31.032864 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/DataType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
/**
* Represents a Rumen data-type.
*/
public interface DataType<T> {
T getValue();
}
| 947 | 35.461538 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/AnonymizableDataType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.rumen.state.StatePool;
/**
* An interface for data-types that can be anonymized.
*/
public interface AnonymizableDataType<T> extends DataType<T> {
public T getAnonymizedValue(StatePool statePool, Configuration conf);
}
| 1,157 | 38.931034 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/ClassName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
import org.apache.hadoop.conf.Configuration;
/**
* Represents a class name.
*/
public class ClassName extends DefaultAnonymizableDataType {
public static final String CLASSNAME_PRESERVE_CONFIG = "rumen.data-types.classname.preserve";
private final String className;
public ClassName(String className) {
super();
this.className = className;
}
@Override
public String getValue() {
return className;
}
@Override
protected String getPrefix() {
return "class";
}
@Override
protected boolean needsAnonymization(Configuration conf) {
String[] preserves = conf.getStrings(CLASSNAME_PRESERVE_CONFIG);
if (preserves != null) {
// do a simple starts with check
for (String p : preserves) {
if (className.startsWith(p)) {
return false;
}
}
}
return true;
}
}
| 1,720 | 29.192982 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/JobName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
/**
* Represents a job's name.
*/
public class JobName extends DefaultAnonymizableDataType {
private final String jobName;
public JobName(String jobName) {
super();
this.jobName = jobName;
}
@Override
public String getValue() {
return jobName;
}
@Override
protected String getPrefix() {
return "job";
}
}
| 1,207 | 28.463415 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/QueueName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes;
/**
* Represents a queue name.
*/
public class QueueName extends DefaultAnonymizableDataType {
private final String queueName;
public QueueName(String queueName) {
super();
this.queueName = queueName;
}
@Override
public String getValue() {
return queueName;
}
@Override
protected String getPrefix() {
return "queue";
};
}
| 1,224 | 28.878049 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/DefaultJobPropertiesParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes.util;
import org.apache.hadoop.tools.rumen.datatypes.DataType;
import org.apache.hadoop.tools.rumen.datatypes.DefaultDataType;
/**
* A simple job property parser that acts like a pass-through filter.
*/
public class DefaultJobPropertiesParser implements JobPropertyParser {
@Override
public DataType<?> parseJobProperty(String key, String value) {
return new DefaultDataType(value);
}
}
| 1,255 | 39.516129 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes.util;
import java.lang.reflect.Field;
import java.text.DecimalFormat;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.tools.rumen.datatypes.*;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
/**
* A default parser for MapReduce job configuration properties.
* MapReduce job configuration properties are represented as key-value pairs.
* Each key represents a configuration knob which controls or affects the
* behavior of a MapReduce job or a job's task. The value associated with the
* configuration key represents its value. Some of the keys are deprecated. As a
* result of deprecation some keys change or are preferred over other keys,
* across versions. {@link MapReduceJobPropertiesParser} is a utility class that
* parses MapReduce job configuration properties and converts the value into a
* well defined {@link DataType}. Users can use the
* {@link MapReduceJobPropertiesParser#parseJobProperty(String, String)} API to
* process job configuration parameters. This API will parse a job property
* represented as a key-value pair and return the value wrapped inside a
* {@link DataType}. Callers can then use the returned {@link DataType} for
* further processing.
*
* {@link MapReduceJobPropertiesParser} thrives on the key name to decide which
* {@link DataType} to wrap the value with. Values for keys representing
* job-name, queue-name, user-name etc are wrapped inside {@link JobName},
* {@link QueueName}, {@link UserName} etc respectively. Keys ending with *dir*
* are considered as a directory and hence gets be wrapped inside
* {@link FileName}. Similarly key ending with *codec*, *log*, *class* etc are
* also handled accordingly. Values representing basic java data-types like
* integer, float, double, boolean etc are wrapped inside
* {@link DefaultDataType}. If the key represents some jvm-level settings then
* only standard settings are extracted and gets wrapped inside
* {@link DefaultDataType}. Currently only '-Xmx' and '-Xms' settings are
* considered while the rest are ignored.
*
* Note that the {@link MapReduceJobPropertiesParser#parseJobProperty(String,
* String)} API maps the keys to a configuration parameter listed in
* {@link MRJobConfig}. This not only filters non-framework specific keys thus
* ignoring user-specific and hard-to-parse keys but also provides a consistent
* view for all possible inputs. So if users invoke the
* {@link MapReduceJobPropertiesParser#parseJobProperty(String, String)} API
* with either <"mapreduce.job.user.name", "bob"> or
* <"user.name", "bob">, then the result would be a {@link UserName}
* {@link DataType} wrapping the user-name "bob".
*/
@SuppressWarnings("deprecation")
public class MapReduceJobPropertiesParser implements JobPropertyParser {
private Field[] mrFields = MRJobConfig.class.getFields();
private DecimalFormat format = new DecimalFormat();
private JobConf configuration = new JobConf(false);
private static final Pattern MAX_HEAP_PATTERN =
Pattern.compile("-Xmx[0-9]+[kKmMgGtT]?+");
private static final Pattern MIN_HEAP_PATTERN =
Pattern.compile("-Xms[0-9]+[kKmMgGtT]?+");
// turn off the warning w.r.t deprecated mapreduce keys
static {
Logger.getLogger(Configuration.class).setLevel(Level.OFF);
}
// Accepts a key if there is a corresponding key in the current mapreduce
// configuration
private boolean accept(String key) {
return getLatestKeyName(key) != null;
}
// Finds a corresponding key for the specified key in the current mapreduce
// setup.
// Note that this API uses a cached copy of the Configuration object. This is
// purely for performance reasons.
private String getLatestKeyName(String key) {
// set the specified key
configuration.set(key, key);
try {
// check if keys in MRConfig maps to the specified key.
for (Field f : mrFields) {
String mrKey = f.get(f.getName()).toString();
if (configuration.get(mrKey) != null) {
return mrKey;
}
}
// unset the key
return null;
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
} finally {
// clean up!
configuration.clear();
}
}
@Override
public DataType<?> parseJobProperty(String key, String value) {
if (accept(key)) {
return fromString(key, value);
}
return null;
}
/**
* Extracts the -Xmx heap option from the specified string.
*/
public static void extractMaxHeapOpts(String javaOptions,
List<String> heapOpts,
List<String> others) {
for (String opt : javaOptions.split(" ")) {
Matcher matcher = MAX_HEAP_PATTERN.matcher(opt);
if (matcher.find()) {
heapOpts.add(opt);
} else {
others.add(opt);
}
}
}
/**
* Extracts the -Xms heap option from the specified string.
*/
public static void extractMinHeapOpts(String javaOptions,
List<String> heapOpts, List<String> others) {
for (String opt : javaOptions.split(" ")) {
Matcher matcher = MIN_HEAP_PATTERN.matcher(opt);
if (matcher.find()) {
heapOpts.add(opt);
} else {
others.add(opt);
}
}
}
// Maps the value of the specified key.
private DataType<?> fromString(String key, String value) {
if (value != null) {
// check known configs
// job-name
String latestKey = getLatestKeyName(key);
if (MRJobConfig.JOB_NAME.equals(latestKey)) {
return new JobName(value);
}
// user-name
if (MRJobConfig.USER_NAME.equals(latestKey)) {
return new UserName(value);
}
// queue-name
if (MRJobConfig.QUEUE_NAME.equals(latestKey)) {
return new QueueName(value);
}
if (MRJobConfig.MAP_JAVA_OPTS.equals(latestKey)
|| MRJobConfig.REDUCE_JAVA_OPTS.equals(latestKey)) {
List<String> heapOptions = new ArrayList<String>();
extractMaxHeapOpts(value, heapOptions, new ArrayList<String>());
extractMinHeapOpts(value, heapOptions, new ArrayList<String>());
return new DefaultDataType(StringUtils.join(heapOptions, ' '));
}
//TODO compression?
//TODO Other job configs like FileOutputFormat/FileInputFormat etc
// check if the config parameter represents a number
try {
format.parse(value);
return new DefaultDataType(value);
} catch (ParseException pe) {}
// check if the config parameters represents a boolean
// avoiding exceptions
if ("true".equals(value) || "false".equals(value)) {
Boolean.parseBoolean(value);
return new DefaultDataType(value);
}
// check if the config parameter represents a class
if (latestKey.endsWith(".class") || latestKey.endsWith(".codec")) {
return new ClassName(value);
}
// handle distributed cache sizes and timestamps
if (latestKey.endsWith("sizes")
|| latestKey.endsWith(".timestamps")) {
new DefaultDataType(value);
}
// check if the config parameter represents a file-system path
//TODO: Make this concrete .location .path .dir .jar?
if (latestKey.endsWith(".dir") || latestKey.endsWith(".location")
|| latestKey.endsWith(".jar") || latestKey.endsWith(".path")
|| latestKey.endsWith(".logfile") || latestKey.endsWith(".file")
|| latestKey.endsWith(".files") || latestKey.endsWith(".archives")) {
try {
return new FileName(value);
} catch (Exception ioe) {}
}
}
return null;
}
}
| 8,918 | 37.947598 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/JobPropertyParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.datatypes.util;
import org.apache.hadoop.tools.rumen.datatypes.DataType;
import org.apache.hadoop.tools.rumen.datatypes.JobProperties;
/**
* A {@link JobProperties} parsing utility.
*/
public interface JobPropertyParser {
/**
* Parse the specified job configuration key-value pair.
*
* @return Returns a {@link DataType} if this parser can parse this value.
* Returns 'null' otherwise.
*/
public DataType<?> parseJobProperty(String key, String value);
}
| 1,331 | 37.057143 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultRumenSerializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.serializers;
import java.io.IOException;
import org.apache.hadoop.tools.rumen.datatypes.DataType;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonProcessingException;
import org.codehaus.jackson.map.JsonSerializer;
import org.codehaus.jackson.map.SerializerProvider;
/**
* Default Rumen JSON serializer.
*/
@SuppressWarnings("unchecked")
public class DefaultRumenSerializer extends JsonSerializer<DataType> {
public void serialize(DataType object, JsonGenerator jGen, SerializerProvider sProvider)
throws IOException, JsonProcessingException {
Object data = object.getValue();
if (data instanceof String) {
jGen.writeString(data.toString());
} else {
jGen.writeObject(data);
}
};
}
| 1,593 | 36.952381 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultAnonymizingRumenSerializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.serializers;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.rumen.datatypes.AnonymizableDataType;
import org.apache.hadoop.tools.rumen.state.StatePool;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonProcessingException;
import org.codehaus.jackson.map.JsonSerializer;
import org.codehaus.jackson.map.SerializerProvider;
/**
* Default Rumen JSON serializer.
*/
@SuppressWarnings("unchecked")
public class DefaultAnonymizingRumenSerializer
extends JsonSerializer<AnonymizableDataType> {
private StatePool statePool;
private Configuration conf;
public DefaultAnonymizingRumenSerializer(StatePool statePool,
Configuration conf) {
this.statePool = statePool;
this.conf = conf;
}
public void serialize(AnonymizableDataType object, JsonGenerator jGen,
SerializerProvider sProvider)
throws IOException, JsonProcessingException {
Object val = object.getAnonymizedValue(statePool, conf);
// output the data if its a string
if (val instanceof String) {
jGen.writeString(val.toString());
} else {
// let the mapper (JSON generator) handle this anonymized object.
jGen.writeObject(val);
}
};
}
| 2,154 | 36.807018 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/BlockingSerializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.serializers;
import java.io.IOException;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonProcessingException;
import org.codehaus.jackson.map.JsonSerializer;
import org.codehaus.jackson.map.SerializerProvider;
/**
* A JSON serializer for Strings.
*/
public class BlockingSerializer extends JsonSerializer<String> {
public void serialize(String object, JsonGenerator jGen, SerializerProvider sProvider)
throws IOException, JsonProcessingException {
jGen.writeNull();
};
}
| 1,362 | 35.837838 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/ObjectStringSerializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.serializers;
import java.io.IOException;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonProcessingException;
import org.codehaus.jackson.map.JsonSerializer;
import org.codehaus.jackson.map.SerializerProvider;
/**
* Rumen JSON serializer for serializing object using toSring() API.
*/
public class ObjectStringSerializer<T> extends JsonSerializer<T> {
public void serialize(T object, JsonGenerator jGen, SerializerProvider sProvider)
throws IOException, JsonProcessingException {
jGen.writeString(object.toString());
};
}
| 1,409 | 39.285714 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ant;
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.LinkedList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FsShell;
import org.apache.tools.ant.AntClassLoader;
import org.apache.tools.ant.BuildException;
import org.apache.tools.ant.Task;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.HdfsConfiguration;
/**
* {@link org.apache.hadoop.fs.FsShell FsShell} wrapper for ant Task.
*/
@InterfaceAudience.Private
public class DfsTask extends Task {
/**
* Default sink for {@link java.lang.System#out}
* and {@link java.lang.System#err}.
*/
private static final OutputStream nullOut = new OutputStream() {
public void write(int b) { /* ignore */ }
public String toString() { return ""; }
};
private static final FsShell shell = new FsShell();
protected AntClassLoader confloader;
protected OutputStream out = nullOut;
protected OutputStream err = nullOut;
// set by ant
protected String cmd;
protected final LinkedList<String> argv = new LinkedList<String>();
protected String outprop;
protected String errprop;
protected boolean failonerror = true;
// saved ant context
private PrintStream antOut;
private PrintStream antErr;
/**
* Sets the command to run in {@link org.apache.hadoop.fs.FsShell FsShell}.
* @param cmd A valid command to FsShell, sans "-".
*/
public void setCmd(String cmd) {
this.cmd = "-" + cmd.trim();
}
/**
* Sets the argument list from a String of comma-separated values.
* @param args A String of comma-separated arguments to FsShell.
*/
public void setArgs(String args) {
for (String s : args.trim().split("\\s*,\\s*"))
argv.add(s);
}
/**
* Sets the property into which System.out will be written.
* @param outprop The name of the property into which System.out is written.
* If the property is defined before this task is executed, it will not be updated.
*/
public void setOut(String outprop) {
this.outprop = outprop;
out = new ByteArrayOutputStream();
if (outprop.equals(errprop))
err = out;
}
/**
* Sets the property into which System.err will be written. If this property
* has the same name as the property for System.out, the two will be interlaced.
* @param errprop The name of the property into which System.err is written.
* If the property is defined before this task is executed, it will not be updated.
*/
public void setErr(String errprop) {
this.errprop = errprop;
err = (errprop.equals(outprop)) ? err = out : new ByteArrayOutputStream();
}
/**
* Sets the path for the parent-last ClassLoader, intended to be used for
* {@link org.apache.hadoop.conf.Configuration Configuration}.
* @param confpath The path to search for resources, classes, etc. before
* parent ClassLoaders.
*/
public void setConf(String confpath) {
confloader = AccessController.doPrivileged(
new PrivilegedAction<AntClassLoader>() {
@Override
public AntClassLoader run() {
return new AntClassLoader(getClass().getClassLoader(), false);
}
});
confloader.setProject(getProject());
if (null != confpath)
confloader.addPathElement(confpath);
}
/**
* Sets a property controlling whether or not a
* {@link org.apache.tools.ant.BuildException BuildException} will be thrown
* if the command returns a value less than zero or throws an exception.
* @param failonerror If true, throw a BuildException on error.
*/
public void setFailonerror(boolean failonerror) {
this.failonerror = failonerror;
}
/**
* Save the current values of System.out, System.err and configure output
* streams for FsShell.
*/
protected void pushContext() {
antOut = System.out;
antErr = System.err;
System.setOut(new PrintStream(out));
System.setErr(out == err ? System.out : new PrintStream(err));
}
/**
* Create the appropriate output properties with their respective output,
* restore System.out, System.err and release any resources from created
* ClassLoaders to aid garbage collection.
*/
protected void popContext() {
// write output to property, if applicable
if (outprop != null && !System.out.checkError())
getProject().setNewProperty(outprop, out.toString());
if (out != err && errprop != null && !System.err.checkError())
getProject().setNewProperty(errprop, err.toString());
System.setErr(antErr);
System.setOut(antOut);
confloader.cleanup();
confloader.setParent(null);
}
// in case DfsTask is overridden
protected int postCmd(int exit_code) {
if ("-test".equals(cmd) && exit_code != 0)
outprop = null;
return exit_code;
}
/**
* Invoke {@link org.apache.hadoop.fs.FsShell#main} after a
* few cursory checks of the configuration.
*/
public void execute() throws BuildException {
if (null == cmd)
throw new BuildException("Missing command (cmd) argument");
argv.add(0, cmd);
if (null == confloader) {
setConf(getProject().getProperty("hadoop.conf.dir"));
}
int exit_code = 0;
try {
pushContext();
Configuration conf = new HdfsConfiguration();
conf.setClassLoader(confloader);
exit_code = ToolRunner.run(conf, shell,
argv.toArray(new String[argv.size()]));
exit_code = postCmd(exit_code);
if (0 > exit_code) {
StringBuilder msg = new StringBuilder();
for (String s : argv)
msg.append(s + " ");
msg.append("failed: " + exit_code);
throw new Exception(msg.toString());
}
} catch (Exception e) {
if (failonerror)
throw new BuildException(e);
} finally {
popContext();
}
}
}
| 6,855 | 31.492891 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/condition/DfsIsDir.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ant.condition;
public class DfsIsDir extends DfsBaseConditional {
protected final char flag = 'd';
protected char getFlag() { return flag; }
}
| 982 | 38.32 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/condition/DfsExists.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ant.condition;
public class DfsExists extends DfsBaseConditional {
protected final char flag = 'e';
protected char getFlag() { return flag; }
}
| 983 | 38.36 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/condition/DfsBaseConditional.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ant.condition;
import org.apache.tools.ant.taskdefs.condition.Condition;
/**
* This wrapper around {@link org.apache.hadoop.ant.DfsTask} implements the
* Ant >1.5
* {@link org.apache.tools.ant.taskdefs.condition.Condition Condition}
* interface for HDFS tests. So one can test conditions like this:
* {@code
* <condition property="precond">
* <and>
* <hadoop:exists file="fileA" />
* <hadoop:exists file="fileB" />
* <hadoop:sizezero file="fileB" />
* </and>
* </condition>
* }
* This will define the property precond if fileA exists and fileB has zero
* length.
*/
public abstract class DfsBaseConditional extends org.apache.hadoop.ant.DfsTask
implements Condition {
protected boolean result;
String file;
private void initArgs() {
setCmd("test");
setArgs("-" + getFlag() + "," + file);
}
public void setFile(String file) {
this.file = file;
}
protected abstract char getFlag();
protected int postCmd(int exit_code) {
exit_code = super.postCmd(exit_code);
result = exit_code == 0;
return exit_code;
}
public boolean eval() {
initArgs();
execute();
return result;
}
}
| 2,042 | 28.608696 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/condition/DfsZeroLen.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ant.condition;
public class DfsZeroLen extends DfsBaseConditional {
protected final char flag = 'z';
protected char getFlag() { return flag; }
}
| 984 | 38.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import com.amazonaws.services.s3.AmazonS3Client;
import org.apache.commons.lang.StringUtils;
import com.amazonaws.AmazonClientException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestS3AConfiguration {
private Configuration conf;
private S3AFileSystem fs;
private static final Logger LOG =
LoggerFactory.getLogger(TestS3AConfiguration.class);
private static final String TEST_ENDPOINT = "test.fs.s3a.endpoint";
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
/**
* Test if custom endpoint is picked up.
* <p/>
* The test expects TEST_ENDPOINT to be defined in the Configuration
* describing the endpoint of the bucket to which TEST_FS_S3A_NAME points
* (f.i. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland).
* Evidently, the bucket has to be hosted in the region denoted by the
* endpoint for the test to succeed.
* <p/>
* More info and the list of endpoint identifiers:
* http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
*
* @throws Exception
*/
@Test
public void TestEndpoint() throws Exception {
conf = new Configuration();
String endpoint = conf.getTrimmed(TEST_ENDPOINT, "");
if (endpoint.isEmpty()) {
LOG.warn("Custom endpoint test skipped as " + TEST_ENDPOINT + "config " +
"setting was not detected");
} else {
conf.set(Constants.ENDPOINT, endpoint);
fs = S3ATestUtils.createTestFileSystem(conf);
AmazonS3Client s3 = fs.getAmazonS3Client();
String endPointRegion = "";
// Differentiate handling of "s3-" and "s3." based endpoint identifiers
String[] endpointParts = StringUtils.split(endpoint, '.');
if (endpointParts.length == 3) {
endPointRegion = endpointParts[0].substring(3);
} else if (endpointParts.length == 4) {
endPointRegion = endpointParts[1];
} else {
fail("Unexpected endpoint");
}
assertEquals("Endpoint config setting and bucket location differ: ",
endPointRegion, s3.getBucketLocation(fs.getUri().getHost()));
}
}
@Test
public void TestProxyConnection() throws Exception {
conf = new Configuration();
conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
conf.set(Constants.PROXY_HOST, "127.0.0.1");
conf.setInt(Constants.PROXY_PORT, 1);
String proxy =
conf.get(Constants.PROXY_HOST) + ":" + conf.get(Constants.PROXY_PORT);
try {
fs = S3ATestUtils.createTestFileSystem(conf);
fail("Expected a connection error for proxy server at " + proxy);
} catch (AmazonClientException e) {
if (!e.getMessage().contains(proxy + " refused")) {
throw e;
}
}
}
@Test
public void TestProxyPortWithoutHost() throws Exception {
conf = new Configuration();
conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
conf.setInt(Constants.PROXY_PORT, 1);
try {
fs = S3ATestUtils.createTestFileSystem(conf);
fail("Expected a proxy configuration error");
} catch (IllegalArgumentException e) {
String msg = e.toString();
if (!msg.contains(Constants.PROXY_HOST) &&
!msg.contains(Constants.PROXY_PORT)) {
throw e;
}
}
}
@Test
public void TestAutomaticProxyPortSelection() throws Exception {
conf = new Configuration();
conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
conf.set(Constants.PROXY_HOST, "127.0.0.1");
conf.set(Constants.SECURE_CONNECTIONS, "true");
try {
fs = S3ATestUtils.createTestFileSystem(conf);
fail("Expected a connection error for proxy server");
} catch (AmazonClientException e) {
if (!e.getMessage().contains("443")) {
throw e;
}
}
conf.set(Constants.SECURE_CONNECTIONS, "false");
try {
fs = S3ATestUtils.createTestFileSystem(conf);
fail("Expected a connection error for proxy server");
} catch (AmazonClientException e) {
if (!e.getMessage().contains("80")) {
throw e;
}
}
}
@Test
public void TestUsernameInconsistentWithPassword() throws Exception {
conf = new Configuration();
conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
conf.set(Constants.PROXY_HOST, "127.0.0.1");
conf.setInt(Constants.PROXY_PORT, 1);
conf.set(Constants.PROXY_USERNAME, "user");
try {
fs = S3ATestUtils.createTestFileSystem(conf);
fail("Expected a connection error for proxy server");
} catch (IllegalArgumentException e) {
String msg = e.toString();
if (!msg.contains(Constants.PROXY_USERNAME) &&
!msg.contains(Constants.PROXY_PASSWORD)) {
throw e;
}
}
conf = new Configuration();
conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
conf.set(Constants.PROXY_HOST, "127.0.0.1");
conf.setInt(Constants.PROXY_PORT, 1);
conf.set(Constants.PROXY_PASSWORD, "password");
try {
fs = S3ATestUtils.createTestFileSystem(conf);
fail("Expected a connection error for proxy server");
} catch (IllegalArgumentException e) {
String msg = e.toString();
if (!msg.contains(Constants.PROXY_USERNAME) &&
!msg.contains(Constants.PROXY_PASSWORD)) {
throw e;
}
}
}
}
| 6,281 | 33.707182 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.junit.internal.AssumptionViolatedException;
import java.io.IOException;
import java.net.URI;
public class S3ATestUtils {
public static S3AFileSystem createTestFileSystem(Configuration conf) throws
IOException {
String fsname = conf.getTrimmed(TestS3AFileSystemContract.TEST_FS_S3A_NAME, "");
boolean liveTest = !StringUtils.isEmpty(fsname);
URI testURI = null;
if (liveTest) {
testURI = URI.create(fsname);
liveTest = testURI.getScheme().equals(Constants.FS_S3A);
}
if (!liveTest) {
// This doesn't work with our JUnit 3 style test cases, so instead we'll
// make this whole class not run by default
throw new AssumptionViolatedException(
"No test filesystem in " + TestS3AFileSystemContract.TEST_FS_S3A_NAME);
}
S3AFileSystem fs1 = new S3AFileSystem();
//enable purging in tests
conf.setBoolean(Constants.PURGE_EXISTING_MULTIPART, true);
conf.setInt(Constants.PURGE_EXISTING_MULTIPART_AGE, 0);
fs1.initialize(testURI, conf);
return fs1;
}
}
| 1,991 | 35.218182 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFileSystemContract.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
/**
* Tests a live S3 system. If your keys and bucket aren't specified, all tests
* are marked as passed.
*
* This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from
* TestCase which uses the old Junit3 runner that doesn't ignore assumptions
* properly making it impossible to skip the tests if we don't have a valid
* bucket.
**/
public class TestS3AFileSystemContract extends FileSystemContractBaseTest {
protected static final Logger LOG =
LoggerFactory.getLogger(TestS3AFileSystemContract.class);
public static final String TEST_FS_S3A_NAME = "test.fs.s3a.name";
@Override
public void setUp() throws Exception {
Configuration conf = new Configuration();
fs = S3ATestUtils.createTestFileSystem(conf);
super.setUp();
}
@Override
protected void tearDown() throws Exception {
if (fs != null) {
fs.delete(path("test"), true);
}
super.tearDown();
}
@Override
public void testMkdirsWithUmask() throws Exception {
// not supported
}
@Override
public void testRenameFileAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
createFile(src);
Path dst = path("/test/new/newfile");
createFile(dst);
// s3 doesn't support rename option
// rename-overwrites-dest is always allowed.
rename(src, dst, true, false, true);
}
@Override
public void testRenameDirectoryAsExistingDirectory() throws Exception {
if (!renameSupported()) {
return;
}
Path src = path("/test/hadoop/dir");
fs.mkdirs(src);
createFile(path("/test/hadoop/dir/file1"));
createFile(path("/test/hadoop/dir/subdir/file2"));
Path dst = path("/test/new/newdir");
fs.mkdirs(dst);
rename(src, dst, true, false, true);
assertFalse("Nested file1 exists",
fs.exists(path("/test/hadoop/dir/file1")));
assertFalse("Nested file2 exists",
fs.exists(path("/test/hadoop/dir/subdir/file2")));
assertTrue("Renamed nested file1 exists",
fs.exists(path("/test/new/newdir/file1")));
assertTrue("Renamed nested exists",
fs.exists(path("/test/new/newdir/subdir/file2")));
}
// @Override
public void testMoveDirUnderParent() throws Throwable {
// not support because
// Fails if dst is a directory that is not empty.
}
}
| 3,422 | 31.292453 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlocksize.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.fileStatsToString;
public class TestS3ABlocksize extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3ABlocksize.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
@Test
@SuppressWarnings("deprecation")
public void testBlockSize() throws Exception {
FileSystem fs = getFileSystem();
long defaultBlockSize = fs.getDefaultBlockSize();
assertEquals("incorrect blocksize",
S3AFileSystem.DEFAULT_BLOCKSIZE, defaultBlockSize);
long newBlockSize = defaultBlockSize * 2;
fs.getConf().setLong(Constants.FS_S3A_BLOCK_SIZE, newBlockSize);
Path dir = path("testBlockSize");
Path file = new Path(dir, "file");
createFile(fs, file, true, dataset(1024, 'a', 'z' - 'a'));
FileStatus fileStatus = fs.getFileStatus(file);
assertEquals("Double default block size in stat(): " + fileStatus,
newBlockSize,
fileStatus.getBlockSize());
// check the listing & assert that the block size is picked up by
// this route too.
boolean found = false;
FileStatus[] listing = fs.listStatus(dir);
for (FileStatus stat : listing) {
LOG.info("entry: {}", stat);
if (file.equals(stat.getPath())) {
found = true;
assertEquals("Double default block size in ls(): " + stat,
newBlockSize,
stat.getBlockSize());
}
}
assertTrue("Did not find " + fileStatsToString(listing, ", "), found);
}
@Test
public void testRootFileStatusHasBlocksize() throws Throwable {
FileSystem fs = getFileSystem();
FileStatus status = fs.getFileStatus(new Path("/"));
assertTrue("Invalid root blocksize",
status.getBlockSize() >= 0);
}
}
| 3,388 | 35.053191 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFastOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
/**
* Tests regular and multi-part upload functionality for S3AFastOutputStream.
* File sizes are kept small to reduce test duration on slow connections
*/
public class TestS3AFastOutputStream {
private FileSystem fs;
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
conf.setLong(Constants.MIN_MULTIPART_THRESHOLD, 5 * 1024 * 1024);
conf.setInt(Constants.MULTIPART_SIZE, 5 * 1024 * 1024);
conf.setBoolean(Constants.FAST_UPLOAD, true);
fs = S3ATestUtils.createTestFileSystem(conf);
}
@After
public void tearDown() throws Exception {
if (fs != null) {
fs.delete(getTestPath(), true);
}
}
protected Path getTestPath() {
return new Path("/tests3a");
}
@Test
public void testRegularUpload() throws IOException {
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 1024 * 1024);
}
@Test
public void testMultiPartUpload() throws IOException {
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 6 * 1024 *
1024);
}
}
| 2,297 | 29.64 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import static org.junit.Assert.assertEquals;
public class TestS3ADeleteManyFiles extends S3AScaleTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3ADeleteManyFiles.class);
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
@Test
public void testBulkRenameAndDelete() throws Throwable {
final Path scaleTestDir = getTestPath();
final Path srcDir = new Path(scaleTestDir, "src");
final Path finalDir = new Path(scaleTestDir, "final");
final long count = getOperationCount();
ContractTestUtils.rm(fs, scaleTestDir, true, false);
fs.mkdirs(srcDir);
fs.mkdirs(finalDir);
int testBufferSize = fs.getConf()
.getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE,
ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
// use Executor to speed up file creation
ExecutorService exec = Executors.newFixedThreadPool(16);
final ExecutorCompletionService<Boolean> completionService =
new ExecutorCompletionService<Boolean>(exec);
try {
final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');
for (int i = 0; i < count; ++i) {
final String fileName = "foo-" + i;
completionService.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws IOException {
ContractTestUtils.createFile(fs, new Path(srcDir, fileName),
false, data);
return fs.exists(new Path(srcDir, fileName));
}
});
}
for (int i = 0; i < count; ++i) {
final Future<Boolean> future = completionService.take();
try {
if (!future.get()) {
LOG.warn("cannot create file");
}
} catch (ExecutionException e) {
LOG.warn("Error while uploading file", e.getCause());
throw e;
}
}
} finally {
exec.shutdown();
}
int nSrcFiles = fs.listStatus(srcDir).length;
fs.rename(srcDir, finalDir);
assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
new Path(srcDir, "foo-" + 0));
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
new Path(srcDir, "foo-" + count / 2));
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
new Path(srcDir, "foo-" + (count - 1)));
ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
new Path(finalDir, "foo-" + 0));
ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
new Path(finalDir, "foo-" + count/2));
ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
new Path(finalDir, "foo-" + (count-1)));
ContractTestUtils.assertDeleted(fs, finalDir, true, false);
}
@Test
public void testOpenCreate() throws IOException {
Path dir = new Path("/tests3a");
ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);
/*
Enable to test the multipart upload
try {
ContractTestUtils.createAndVerifyFile(fs, dir,
(long)6 * 1024 * 1024 * 1024);
} catch (IOException e) {
fail(e.getMessage());
}
*/
}
}
| 4,756 | 35.037879 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3a.S3ATestUtils;
import org.junit.After;
import org.junit.Before;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import static org.junit.Assume.assumeTrue;
/**
* Base class for scale tests; here is where the common scale configuration
* keys are defined
*/
public class S3AScaleTestBase {
public static final String SCALE_TEST = "scale.test.";
public static final String KEY_OPERATION_COUNT =
SCALE_TEST + "operation.count";
public static final long DEFAULT_OPERATION_COUNT = 2005;
protected S3AFileSystem fs;
private static final Logger LOG =
LoggerFactory.getLogger(S3AScaleTestBase.class);
private Configuration conf;
/**
* Configuration generator. May be overridden to inject
* some custom options
* @return a configuration with which to create FS instances
*/
protected Configuration createConfiguration() {
return new Configuration();
}
/**
* Get the configuration used to set up the FS
* @return the configuration
*/
public Configuration getConf() {
return conf;
}
@Before
public void setUp() throws Exception {
conf = createConfiguration();
fs = S3ATestUtils.createTestFileSystem(conf);
}
@After
public void tearDown() throws Exception {
ContractTestUtils.rm(fs, getTestPath(), true, true);
}
protected Path getTestPath() {
return new Path("/tests3a");
}
protected long getOperationCount() {
return getConf().getLong(KEY_OPERATION_COUNT, DEFAULT_OPERATION_COUNT);
}
}
| 2,628 | 28.211111 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/Jets3tS3FileSystemContractTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
public class Jets3tS3FileSystemContractTest
extends S3FileSystemContractBaseTest {
@Override
FileSystemStore getFileSystemStore() throws IOException {
return new Jets3tFileSystemStore();
}
}
| 1,078 | 32.71875 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3.INode.FileType;
/**
* A stub implementation of {@link FileSystemStore} for testing
* {@link S3FileSystem} without actually connecting to S3.
*/
public class InMemoryFileSystemStore implements FileSystemStore {
private Configuration conf;
private SortedMap<Path, INode> inodes = new TreeMap<Path, INode>();
private Map<Long, byte[]> blocks = new HashMap<Long, byte[]>();
@Override
public void initialize(URI uri, Configuration conf) {
this.conf = conf;
}
@Override
public String getVersion() throws IOException {
return "0";
}
@Override
public void deleteINode(Path path) throws IOException {
inodes.remove(normalize(path));
}
@Override
public void deleteBlock(Block block) throws IOException {
blocks.remove(block.getId());
}
@Override
public boolean inodeExists(Path path) throws IOException {
return inodes.containsKey(normalize(path));
}
@Override
public boolean blockExists(long blockId) throws IOException {
return blocks.containsKey(blockId);
}
@Override
public INode retrieveINode(Path path) throws IOException {
return inodes.get(normalize(path));
}
@Override
public File retrieveBlock(Block block, long byteRangeStart) throws IOException {
byte[] data = blocks.get(block.getId());
File file = createTempFile();
BufferedOutputStream out = null;
try {
out = new BufferedOutputStream(new FileOutputStream(file));
out.write(data, (int) byteRangeStart, data.length - (int) byteRangeStart);
} finally {
if (out != null) {
out.close();
}
}
return file;
}
private File createTempFile() throws IOException {
File dir = new File(conf.get("fs.s3.buffer.dir"));
if (!dir.exists() && !dir.mkdirs()) {
throw new IOException("Cannot create S3 buffer directory: " + dir);
}
File result = File.createTempFile("test-", ".tmp", dir);
result.deleteOnExit();
return result;
}
@Override
public Set<Path> listSubPaths(Path path) throws IOException {
Path normalizedPath = normalize(path);
// This is inefficient but more than adequate for testing purposes.
Set<Path> subPaths = new LinkedHashSet<Path>();
for (Path p : inodes.tailMap(normalizedPath).keySet()) {
if (normalizedPath.equals(p.getParent())) {
subPaths.add(p);
}
}
return subPaths;
}
@Override
public Set<Path> listDeepSubPaths(Path path) throws IOException {
Path normalizedPath = normalize(path);
String pathString = normalizedPath.toUri().getPath();
if (!pathString.endsWith("/")) {
pathString += "/";
}
// This is inefficient but more than adequate for testing purposes.
Set<Path> subPaths = new LinkedHashSet<Path>();
for (Path p : inodes.tailMap(normalizedPath).keySet()) {
if (p.toUri().getPath().startsWith(pathString)) {
subPaths.add(p);
}
}
return subPaths;
}
@Override
public void storeINode(Path path, INode inode) throws IOException {
inodes.put(normalize(path), inode);
}
@Override
public void storeBlock(Block block, File file) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] buf = new byte[8192];
int numRead;
BufferedInputStream in = null;
try {
in = new BufferedInputStream(new FileInputStream(file));
while ((numRead = in.read(buf)) >= 0) {
out.write(buf, 0, numRead);
}
} finally {
if (in != null) {
in.close();
}
}
blocks.put(block.getId(), out.toByteArray());
}
private Path normalize(Path path) {
if (!path.isAbsolute()) {
throw new IllegalArgumentException("Path must be absolute: " + path);
}
return new Path(path.toUri().getPath());
}
@Override
public void purge() throws IOException {
inodes.clear();
blocks.clear();
}
@Override
public void dump() throws IOException {
StringBuilder sb = new StringBuilder(getClass().getSimpleName());
sb.append(", \n");
for (Map.Entry<Path, INode> entry : inodes.entrySet()) {
sb.append(entry.getKey()).append("\n");
INode inode = entry.getValue();
sb.append("\t").append(inode.getFileType()).append("\n");
if (inode.getFileType() == FileType.DIRECTORY) {
continue;
}
for (int j = 0; j < inode.getBlocks().length; j++) {
sb.append("\t").append(inode.getBlocks()[j]).append("\n");
}
}
System.out.println(sb);
System.out.println(inodes.keySet());
System.out.println(blocks.keySet());
}
}
| 5,950 | 28.755 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/S3InMemoryFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import org.apache.hadoop.fs.s3.S3FileSystem;
import org.apache.hadoop.fs.s3.InMemoryFileSystemStore;
/**
* A helper implementation of {@link S3FileSystem}
* without actually connecting to S3 for unit testing.
*/
public class S3InMemoryFileSystem extends S3FileSystem {
public S3InMemoryFileSystem() {
super(new InMemoryFileSystemStore());
}
}
| 1,205 | 35.545455 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestINode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
import java.io.InputStream;
import junit.framework.TestCase;
import org.apache.hadoop.fs.s3.INode.FileType;
public class TestINode extends TestCase {
public void testSerializeFileWithSingleBlock() throws IOException {
Block[] blocks = { new Block(849282477840258181L, 128L) };
INode inode = new INode(FileType.FILE, blocks);
assertEquals("Length", 1L + 4 + 16, inode.getSerializedLength());
InputStream in = inode.serialize();
INode deserialized = INode.deserialize(in);
assertEquals("FileType", inode.getFileType(), deserialized.getFileType());
Block[] deserializedBlocks = deserialized.getBlocks();
assertEquals("Length", 1, deserializedBlocks.length);
assertEquals("Id", blocks[0].getId(), deserializedBlocks[0].getId());
assertEquals("Length", blocks[0].getLength(), deserializedBlocks[0]
.getLength());
}
public void testSerializeDirectory() throws IOException {
INode inode = INode.DIRECTORY_INODE;
assertEquals("Length", 1L, inode.getSerializedLength());
InputStream in = inode.serialize();
INode deserialized = INode.deserialize(in);
assertSame(INode.DIRECTORY_INODE, deserialized);
}
public void testDeserializeNull() throws IOException {
assertNull(INode.deserialize(null));
}
}
| 2,171 | 34.606557 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestS3InMemoryFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
public class TestS3InMemoryFileSystem extends TestCase {
private static final String TEST_PATH = "s3://test/data.txt";
private static final String TEST_DATA = "Sample data for testing.";
private S3InMemoryFileSystem fs;
@Override
public void setUp() throws IOException {
fs = new S3InMemoryFileSystem();
fs.initialize(URI.create("s3://test/"), new Configuration());
}
public void testBasicReadWriteIO() throws IOException {
FSDataOutputStream writeStream = fs.create(new Path(TEST_PATH));
writeStream.write(TEST_DATA.getBytes());
writeStream.flush();
writeStream.close();
FSDataInputStream readStream = fs.open(new Path(TEST_PATH));
BufferedReader br = new BufferedReader(new InputStreamReader(readStream));
String line = "";
StringBuffer stringBuffer = new StringBuffer();
while ((line = br.readLine()) != null) {
stringBuffer.append(line);
}
br.close();
assert(TEST_DATA.equals(stringBuffer.toString()));
}
@Override
public void tearDown() throws IOException {
fs.close();
}
}
| 2,260 | 32.25 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystemContract.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
public class TestInMemoryS3FileSystemContract
extends S3FileSystemContractBaseTest {
@Override
FileSystemStore getFileSystemStore() throws IOException {
return new InMemoryFileSystemStore();
}
}
| 1,082 | 32.84375 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestS3FileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
public class TestS3FileSystem extends TestCase {
public void testInitialization() throws IOException {
initializationTest("s3://a:b@c", "s3://a:b@c");
initializationTest("s3://a:b@c/", "s3://a:b@c");
initializationTest("s3://a:b@c/path", "s3://a:b@c");
initializationTest("s3://a@c", "s3://a@c");
initializationTest("s3://a@c/", "s3://a@c");
initializationTest("s3://a@c/path", "s3://a@c");
initializationTest("s3://c", "s3://c");
initializationTest("s3://c/", "s3://c");
initializationTest("s3://c/path", "s3://c");
}
private void initializationTest(String initializationUri, String expectedUri)
throws IOException {
S3FileSystem fs = new S3FileSystem(new InMemoryFileSystemStore());
fs.initialize(URI.create(initializationUri), new Configuration());
assertEquals(URI.create(expectedUri), fs.getUri());
}
}
| 1,850 | 35.294118 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import java.io.File;
import java.net.URI;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestName;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestS3Credentials {
public static final Log LOG = LogFactory.getLog(TestS3Credentials.class);
@Rule
public final TestName test = new TestName();
@Before
public void announce() {
LOG.info("Running test " + test.getMethodName());
}
private static final String EXAMPLE_ID = "AKASOMEACCESSKEY";
private static final String EXAMPLE_KEY =
"RGV0cm9pdCBSZ/WQgY2xl/YW5lZCB1cAEXAMPLE";
@Test
public void testInvalidHostnameWithUnderscores() throws Exception {
S3Credentials s3Credentials = new S3Credentials();
try {
s3Credentials.initialize(new URI("s3://a:b@c_d"), new Configuration());
fail("Should throw IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertEquals("Invalid hostname in URI s3://a:b@c_d", e.getMessage());
}
}
@Test
public void testPlaintextConfigPassword() throws Exception {
S3Credentials s3Credentials = new S3Credentials();
Configuration conf = new Configuration();
conf.set("fs.s3.awsAccessKeyId", EXAMPLE_ID);
conf.set("fs.s3.awsSecretAccessKey", EXAMPLE_KEY);
s3Credentials.initialize(new URI("s3://foobar"), conf);
assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
s3Credentials.getAccessKey());
assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
s3Credentials.getSecretAccessKey());
}
@Test
public void testPlaintextConfigPasswordWithWhitespace() throws Exception {
S3Credentials s3Credentials = new S3Credentials();
Configuration conf = new Configuration();
conf.set("fs.s3.awsAccessKeyId", "\r\n " + EXAMPLE_ID +
" \r\n");
conf.set("fs.s3.awsSecretAccessKey", "\r\n " + EXAMPLE_KEY +
" \r\n");
s3Credentials.initialize(new URI("s3://foobar"), conf);
assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
s3Credentials.getAccessKey());
assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
s3Credentials.getSecretAccessKey());
}
@Rule
public final TemporaryFolder tempDir = new TemporaryFolder();
@Test
public void testCredentialProvider() throws Exception {
// set up conf to have a cred provider
final Configuration conf = new Configuration();
final File file = tempDir.newFile("test.jks");
final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
file.toURI());
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
jks.toString());
// add our creds to the provider
final CredentialProvider provider =
CredentialProviderFactory.getProviders(conf).get(0);
provider.createCredentialEntry("fs.s3.awsSecretAccessKey",
EXAMPLE_KEY.toCharArray());
provider.flush();
// make sure S3Creds can retrieve things.
S3Credentials s3Credentials = new S3Credentials();
conf.set("fs.s3.awsAccessKeyId", EXAMPLE_ID);
s3Credentials.initialize(new URI("s3://foobar"), conf);
assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
s3Credentials.getAccessKey());
assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
s3Credentials.getSecretAccessKey());
}
@Test(expected=IllegalArgumentException.class)
public void noSecretShouldThrow() throws Exception {
S3Credentials s3Credentials = new S3Credentials();
Configuration conf = new Configuration();
conf.set("fs.s3.awsAccessKeyId", EXAMPLE_ID);
s3Credentials.initialize(new URI("s3://foobar"), conf);
}
@Test(expected=IllegalArgumentException.class)
public void noAccessIdShouldThrow() throws Exception {
S3Credentials s3Credentials = new S3Credentials();
Configuration conf = new Configuration();
conf.set("fs.s3.awsSecretAccessKey", EXAMPLE_KEY);
s3Credentials.initialize(new URI("s3://foobar"), conf);
}
}
| 5,246 | 37.021739 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
import java.net.URI;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.junit.internal.AssumptionViolatedException;
public abstract class S3FileSystemContractBaseTest
extends FileSystemContractBaseTest {
public static final String KEY_TEST_FS = "test.fs.s3.name";
private FileSystemStore store;
abstract FileSystemStore getFileSystemStore() throws IOException;
@Override
protected void setUp() throws Exception {
Configuration conf = new Configuration();
store = getFileSystemStore();
fs = new S3FileSystem(store);
String fsname = conf.get(KEY_TEST_FS);
if (StringUtils.isEmpty(fsname)) {
throw new AssumptionViolatedException(
"No test FS defined in :" + KEY_TEST_FS);
}
fs.initialize(URI.create(fsname), conf);
}
@Override
protected void tearDown() throws Exception {
store.purge();
super.tearDown();
}
public void testCanonicalName() throws Exception {
assertNull("s3 doesn't support security token and shouldn't have canonical name",
fs.getCanonicalServiceName());
}
}
| 2,059 | 32.225806 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
/**
* The contract of S3A: only enabled if the test bucket is provided
*/
public class S3AContract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "contract/s3a.xml";
public S3AContract(Configuration conf) {
super(conf);
//insert the base features
addConfResource(CONTRACT_XML);
}
@Override
public String getScheme() {
return "s3a";
}
}
| 1,374 | 30.25 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractSeek.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3AContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
}
| 1,219 | 37.125 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.Test;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
public class TestS3AContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Override
public void testRenameDirIntoExistingDir() throws Throwable {
describe("Verify renaming a dir into an existing dir puts the files"
+" from the source dir into the existing dir"
+" and leaves existing files alone");
FileSystem fs = getFileSystem();
String sourceSubdir = "source";
Path srcDir = path(sourceSubdir);
Path srcFilePath = new Path(srcDir, "source-256.txt");
byte[] srcDataset = dataset(256, 'a', 'z');
writeDataset(fs, srcFilePath, srcDataset, srcDataset.length, 1024, false);
Path destDir = path("dest");
Path destFilePath = new Path(destDir, "dest-512.txt");
byte[] destDateset = dataset(512, 'A', 'Z');
writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024,
false);
assertIsFile(destFilePath);
boolean rename = fs.rename(srcDir, destDir);
assertFalse("s3a doesn't support rename to non-empty directory", rename);
}
}
| 2,479 | 39 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractDelete.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3AContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
}
| 1,225 | 37.3125 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractMkdir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* Test dir operations on S3
*/
public class TestS3AContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
}
| 1,259 | 35 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractOpen.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3AContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
}
| 1,219 | 37.125 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractCreate.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestS3AContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
ContractTestUtils.skip(
"blobstores can't distinguish empty directories from files");
}
}
| 1,459 | 36.435897 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractRootDir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* root dir operations against an S3 bucket
*/
public class TestS3AContractRootDir extends
AbstractContractRootDirectoryTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
}
| 1,296 | 35.027778 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractMkdir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* Test dir operations on S3
*/
public class TestS3NContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeS3Contract(conf);
}
}
| 1,264 | 35.142857 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractDelete.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3NContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeS3Contract(conf);
}
}
| 1,230 | 37.46875 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractOpen.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3NContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeS3Contract(conf);
}
}
| 1,224 | 37.28125 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractRootDir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* root dir operations against an S3 bucket
*/
public class TestS3NContractRootDir extends
AbstractContractRootDirectoryTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeS3Contract(conf);
}
}
| 1,301 | 35.166667 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3n/NativeS3Contract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
/**
* The contract of S3N: only enabled if the test bucket is provided
*/
public class NativeS3Contract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "contract/s3n.xml";
public NativeS3Contract(Configuration conf) {
super(conf);
//insert the base features
addConfResource(CONTRACT_XML);
}
@Override
public String getScheme() {
return "s3n";
}
}
| 1,384 | 30.477273 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractSeek.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3NContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeS3Contract(conf);
}
}
| 1,224 | 37.28125 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractCreate.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestS3NContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeS3Contract(conf);
}
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
ContractTestUtils.skip(
"blobstores can't distinguish empty directories from files");
}
}
| 1,464 | 36.564103 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3n/TestS3NContractRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3NContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeS3Contract(conf);
}
}
| 1,231 | 36.333333 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import static org.junit.Assert.*;
import static org.junit.Assume.*;
import org.junit.Before;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.security.DigestInputStream;
import java.security.DigestOutputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
public class TestJets3tNativeFileSystemStore {
private Configuration conf;
private Jets3tNativeFileSystemStore store;
private NativeS3FileSystem fs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
store = new Jets3tNativeFileSystemStore();
fs = new NativeS3FileSystem(store);
conf.setBoolean("fs.s3n.multipart.uploads.enabled", true);
conf.setLong("fs.s3n.multipart.uploads.block.size", 64 * 1024 * 1024);
fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
}
@After
public void tearDown() throws Exception {
try {
store.purge("test");
} catch (Exception e) {}
}
@BeforeClass
public static void checkSettings() throws Exception {
Configuration conf = new Configuration();
assumeNotNull(conf.get("fs.s3n.awsAccessKeyId"));
assumeNotNull(conf.get("fs.s3n.awsSecretAccessKey"));
assumeNotNull(conf.get("test.fs.s3n.name"));
}
protected void writeRenameReadCompare(Path path, long len)
throws IOException, NoSuchAlgorithmException {
// If len > fs.s3n.multipart.uploads.block.size,
// we'll use a multipart upload copy
MessageDigest digest = MessageDigest.getInstance("MD5");
OutputStream out = new BufferedOutputStream(
new DigestOutputStream(fs.create(path, false), digest));
for (long i = 0; i < len; i++) {
out.write('Q');
}
out.flush();
out.close();
assertTrue("Exists", fs.exists(path));
// Depending on if this file is over 5 GB or not,
// rename will cause a multipart upload copy
Path copyPath = path.suffix(".copy");
fs.rename(path, copyPath);
assertTrue("Copy exists", fs.exists(copyPath));
// Download file from S3 and compare the digest against the original
MessageDigest digest2 = MessageDigest.getInstance("MD5");
InputStream in = new BufferedInputStream(
new DigestInputStream(fs.open(copyPath), digest2));
long copyLen = 0;
while (in.read() != -1) {copyLen++;}
in.close();
assertEquals("Copy length matches original", len, copyLen);
assertArrayEquals("Digests match", digest.digest(), digest2.digest());
}
@Test
public void testSmallUpload() throws IOException, NoSuchAlgorithmException {
// Regular upload, regular copy
writeRenameReadCompare(new Path("/test/small"), 16384);
}
@Test
public void testMediumUpload() throws IOException, NoSuchAlgorithmException {
// Multipart upload, regular copy
writeRenameReadCompare(new Path("/test/medium"), 33554432); // 100 MB
}
/*
Enable Multipart upload to run this test
@Test
public void testExtraLargeUpload()
throws IOException, NoSuchAlgorithmException {
// Multipart upload, multipart copy
writeRenameReadCompare(new Path("/test/xlarge"), 5368709121L); // 5GB+1byte
}
*/
}
| 4,276 | 31.9 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3native.NativeS3FileSystem.NativeS3FsInputStream;
import org.junit.internal.AssumptionViolatedException;
public abstract class NativeS3FileSystemContractBaseTest
extends FileSystemContractBaseTest {
public static final String KEY_TEST_FS = "test.fs.s3n.name";
private NativeFileSystemStore store;
abstract NativeFileSystemStore getNativeFileSystemStore() throws IOException;
@Override
protected void setUp() throws Exception {
Configuration conf = new Configuration();
store = getNativeFileSystemStore();
fs = new NativeS3FileSystem(store);
String fsname = conf.get(KEY_TEST_FS);
if (StringUtils.isEmpty(fsname)) {
throw new AssumptionViolatedException(
"No test FS defined in :" + KEY_TEST_FS);
}
fs.initialize(URI.create(fsname), conf);
}
@Override
protected void tearDown() throws Exception {
store.purge("test");
super.tearDown();
}
public void testCanonicalName() throws Exception {
assertNull("s3n doesn't support security token and shouldn't have canonical name",
fs.getCanonicalServiceName());
}
public void testListStatusForRoot() throws Exception {
FileStatus[] paths = fs.listStatus(path("/"));
assertEquals(0, paths.length);
Path testDir = path("/test");
assertTrue(fs.mkdirs(testDir));
paths = fs.listStatus(path("/"));
assertEquals(1, paths.length);
assertEquals(path("/test"), paths[0].getPath());
}
public void testNoTrailingBackslashOnBucket() throws Exception {
assertTrue(fs.getFileStatus(new Path(fs.getUri().toString())).isDirectory());
}
private void createTestFiles(String base) throws IOException {
store.storeEmptyFile(base + "/file1");
store.storeEmptyFile(base + "/dir/file2");
store.storeEmptyFile(base + "/dir/file3");
}
public void testDirWithDifferentMarkersWorks() throws Exception {
for (int i = 0; i < 3; i++) {
String base = "test/hadoop" + i;
Path path = path("/" + base);
createTestFiles(base);
if (i == 0 ) {
//do nothing, we are testing correctness with no markers
}
else if (i == 1) {
// test for _$folder$ marker
store.storeEmptyFile(base + "_$folder$");
store.storeEmptyFile(base + "/dir_$folder$");
}
else if (i == 2) {
// test the end slash file marker
store.storeEmptyFile(base + "/");
store.storeEmptyFile(base + "/dir/");
}
else if (i == 3) {
// test both markers
store.storeEmptyFile(base + "_$folder$");
store.storeEmptyFile(base + "/dir_$folder$");
store.storeEmptyFile(base + "/");
store.storeEmptyFile(base + "/dir/");
}
assertTrue(fs.getFileStatus(path).isDirectory());
assertEquals(2, fs.listStatus(path).length);
}
}
public void testDeleteWithNoMarker() throws Exception {
String base = "test/hadoop";
Path path = path("/" + base);
createTestFiles(base);
fs.delete(path, true);
path = path("/test");
assertTrue(fs.getFileStatus(path).isDirectory());
assertEquals(0, fs.listStatus(path).length);
}
public void testRenameWithNoMarker() throws Exception {
String base = "test/hadoop";
Path dest = path("/test/hadoop2");
createTestFiles(base);
fs.rename(path("/" + base), dest);
Path path = path("/test");
assertTrue(fs.getFileStatus(path).isDirectory());
assertEquals(1, fs.listStatus(path).length);
assertTrue(fs.getFileStatus(dest).isDirectory());
assertEquals(2, fs.listStatus(dest).length);
}
public void testEmptyFile() throws Exception {
store.storeEmptyFile("test/hadoop/file1");
fs.open(path("/test/hadoop/file1")).close();
}
public void testBlockSize() throws Exception {
Path file = path("/test/hadoop/file");
createFile(file);
assertEquals("Default block size", fs.getDefaultBlockSize(file),
fs.getFileStatus(file).getBlockSize());
// Block size is determined at read time
long newBlockSize = fs.getDefaultBlockSize(file) * 2;
fs.getConf().setLong("fs.s3n.block.size", newBlockSize);
assertEquals("Double default block size", newBlockSize,
fs.getFileStatus(file).getBlockSize());
}
public void testRetryOnIoException() throws Exception {
class TestInputStream extends InputStream {
boolean shouldThrow = true;
int throwCount = 0;
int pos = 0;
byte[] bytes;
boolean threwException = false;
public TestInputStream() {
bytes = new byte[256];
for (int i = pos; i < 256; i++) {
bytes[i] = (byte)i;
}
}
@Override
public int read() throws IOException {
shouldThrow = !shouldThrow;
if (shouldThrow) {
throwCount++;
threwException = true;
throw new IOException();
}
assertFalse("IOException was thrown. InputStream should be reopened", threwException);
return pos++;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
shouldThrow = !shouldThrow;
if (shouldThrow) {
throwCount++;
threwException = true;
throw new IOException();
}
assertFalse("IOException was thrown. InputStream should be reopened", threwException);
int sizeToRead = Math.min(len, 256 - pos);
for (int i = 0; i < sizeToRead; i++) {
b[i] = bytes[pos + i];
}
pos += sizeToRead;
return sizeToRead;
}
public void reopenAt(long byteRangeStart) {
threwException = false;
pos = Long.valueOf(byteRangeStart).intValue();
}
}
final TestInputStream is = new TestInputStream();
class MockNativeFileSystemStore extends Jets3tNativeFileSystemStore {
@Override
public InputStream retrieve(String key, long byteRangeStart) throws IOException {
is.reopenAt(byteRangeStart);
return is;
}
}
NativeS3FsInputStream stream = new NativeS3FsInputStream(new MockNativeFileSystemStore(), null, is, "");
// Test reading methods.
byte[] result = new byte[256];
for (int i = 0; i < 128; i++) {
result[i] = (byte)stream.read();
}
for (int i = 128; i < 256; i += 8) {
byte[] temp = new byte[8];
int read = stream.read(temp, 0, 8);
assertEquals(8, read);
System.arraycopy(temp, 0, result, i, 8);
}
// Assert correct
for (int i = 0; i < 256; i++) {
assertEquals((byte)i, result[i]);
}
// Test to make sure the throw path was exercised.
// every read should have thrown 1 IOException except for the first read
// 144 = 128 - 1 + (128 / 8)
assertEquals(143, ((TestInputStream)is).throwCount);
}
}
| 8,012 | 30.671937 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
public class TestS3NInMemoryFileSystem extends TestCase {
private static final String TEST_PATH = "s3n://test/data.txt";
private static final String TEST_DATA = "Sample data for testing.";
private S3NInMemoryFileSystem fs;
@Override
public void setUp() throws IOException {
fs = new S3NInMemoryFileSystem();
fs.initialize(URI.create("s3n://test/"), new Configuration());
}
public void testBasicReadWriteIO() throws IOException {
FSDataOutputStream writeData = fs.create(new Path(TEST_PATH));
writeData.write(TEST_DATA.getBytes());
writeData.flush();
writeData.close();
FSDataInputStream readData = fs.open(new Path(TEST_PATH));
BufferedReader br = new BufferedReader(new InputStreamReader(readData));
String line = "";
StringBuffer stringBuffer = new StringBuffer();
while ((line = br.readLine()) != null) {
stringBuffer.append(line);
}
br.close();
assert(TEST_DATA.equals(stringBuffer.toString()));
}
@Override
public void tearDown() throws IOException {
fs.close();
}
}
| 2,261 | 31.314286 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
import org.apache.hadoop.fs.s3native.InMemoryNativeFileSystemStore;
/**
* A helper implementation of {@link NativeS3FileSystem}
* without actually connecting to S3 for unit testing.
*/
public class S3NInMemoryFileSystem extends NativeS3FileSystem {
public S3NInMemoryFileSystem() {
super(new InMemoryNativeFileSystemStore());
}
}
| 1,255 | 37.060606 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import java.io.IOException;
public class Jets3tNativeS3FileSystemContractTest
extends NativeS3FileSystemContractBaseTest {
@Override
NativeFileSystemStore getNativeFileSystemStore() throws IOException {
return new Jets3tNativeFileSystemStore();
}
}
| 1,113 | 34.935484 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import java.io.IOException;
public class TestInMemoryNativeS3FileSystemContract
extends NativeS3FileSystemContractBaseTest {
@Override
NativeFileSystemStore getNativeFileSystemStore() throws IOException {
return new InMemoryNativeFileSystemStore();
}
}
| 1,115 | 35 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Time;
/**
* <p>
* A stub implementation of {@link NativeFileSystemStore} for testing
* {@link NativeS3FileSystem} without actually connecting to S3.
* </p>
*/
public class InMemoryNativeFileSystemStore implements NativeFileSystemStore {
private Configuration conf;
private SortedMap<String, FileMetadata> metadataMap =
new TreeMap<String, FileMetadata>();
private SortedMap<String, byte[]> dataMap = new TreeMap<String, byte[]>();
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
this.conf = conf;
}
@Override
public void storeEmptyFile(String key) throws IOException {
metadataMap.put(key, new FileMetadata(key, 0, Time.now()));
dataMap.put(key, new byte[0]);
}
@Override
public void storeFile(String key, File file, byte[] md5Hash)
throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] buf = new byte[8192];
int numRead;
BufferedInputStream in = null;
try {
in = new BufferedInputStream(new FileInputStream(file));
while ((numRead = in.read(buf)) >= 0) {
out.write(buf, 0, numRead);
}
} finally {
if (in != null) {
in.close();
}
}
metadataMap.put(key,
new FileMetadata(key, file.length(), Time.now()));
dataMap.put(key, out.toByteArray());
}
@Override
public InputStream retrieve(String key) throws IOException {
return retrieve(key, 0);
}
@Override
public InputStream retrieve(String key, long byteRangeStart)
throws IOException {
byte[] data = dataMap.get(key);
File file = createTempFile();
BufferedOutputStream out = null;
try {
out = new BufferedOutputStream(new FileOutputStream(file));
out.write(data, (int) byteRangeStart,
data.length - (int) byteRangeStart);
} finally {
if (out != null) {
out.close();
}
}
return new FileInputStream(file);
}
private File createTempFile() throws IOException {
File dir = new File(conf.get("fs.s3.buffer.dir"));
if (!dir.exists() && !dir.mkdirs()) {
throw new IOException("Cannot create S3 buffer directory: " + dir);
}
File result = File.createTempFile("test-", ".tmp", dir);
result.deleteOnExit();
return result;
}
@Override
public FileMetadata retrieveMetadata(String key) throws IOException {
return metadataMap.get(key);
}
@Override
public PartialListing list(String prefix, int maxListingLength)
throws IOException {
return list(prefix, maxListingLength, null, false);
}
@Override
public PartialListing list(String prefix, int maxListingLength,
String priorLastKey, boolean recursive) throws IOException {
return list(prefix, recursive ? null : PATH_DELIMITER, maxListingLength, priorLastKey);
}
private PartialListing list(String prefix, String delimiter,
int maxListingLength, String priorLastKey) throws IOException {
if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
List<FileMetadata> metadata = new ArrayList<FileMetadata>();
SortedSet<String> commonPrefixes = new TreeSet<String>();
for (String key : dataMap.keySet()) {
if (key.startsWith(prefix)) {
if (delimiter == null) {
metadata.add(retrieveMetadata(key));
} else {
int delimIndex = key.indexOf(delimiter, prefix.length());
if (delimIndex == -1) {
metadata.add(retrieveMetadata(key));
} else {
String commonPrefix = key.substring(0, delimIndex);
commonPrefixes.add(commonPrefix);
}
}
}
if (metadata.size() + commonPrefixes.size() == maxListingLength) {
new PartialListing(key, metadata.toArray(new FileMetadata[0]),
commonPrefixes.toArray(new String[0]));
}
}
return new PartialListing(null, metadata.toArray(new FileMetadata[0]),
commonPrefixes.toArray(new String[0]));
}
@Override
public void delete(String key) throws IOException {
metadataMap.remove(key);
dataMap.remove(key);
}
@Override
public void copy(String srcKey, String dstKey) throws IOException {
metadataMap.put(dstKey, metadataMap.get(srcKey));
dataMap.put(dstKey, dataMap.get(srcKey));
}
@Override
public void purge(String prefix) throws IOException {
Iterator<Entry<String, FileMetadata>> i =
metadataMap.entrySet().iterator();
while (i.hasNext()) {
Entry<String, FileMetadata> entry = i.next();
if (entry.getKey().startsWith(prefix)) {
dataMap.remove(entry.getKey());
i.remove();
}
}
}
@Override
public void dump() throws IOException {
System.out.println(metadataMap.values());
System.out.println(dataMap.keySet());
}
}
| 6,355 | 29.705314 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
public class Constants {
// s3 access key
public static final String ACCESS_KEY = "fs.s3a.access.key";
// s3 secret key
public static final String SECRET_KEY = "fs.s3a.secret.key";
// number of simultaneous connections to s3
public static final String MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum";
public static final int DEFAULT_MAXIMUM_CONNECTIONS = 15;
// connect to s3 over ssl?
public static final String SECURE_CONNECTIONS = "fs.s3a.connection.ssl.enabled";
public static final boolean DEFAULT_SECURE_CONNECTIONS = true;
//use a custom endpoint?
public static final String ENDPOINT = "fs.s3a.endpoint";
//connect to s3 through a proxy server?
public static final String PROXY_HOST = "fs.s3a.proxy.host";
public static final String PROXY_PORT = "fs.s3a.proxy.port";
public static final String PROXY_USERNAME = "fs.s3a.proxy.username";
public static final String PROXY_PASSWORD = "fs.s3a.proxy.password";
public static final String PROXY_DOMAIN = "fs.s3a.proxy.domain";
public static final String PROXY_WORKSTATION = "fs.s3a.proxy.workstation";
// number of times we should retry errors
public static final String MAX_ERROR_RETRIES = "fs.s3a.attempts.maximum";
public static final int DEFAULT_MAX_ERROR_RETRIES = 10;
// seconds until we give up trying to establish a connection to s3
public static final String ESTABLISH_TIMEOUT = "fs.s3a.connection.establish.timeout";
public static final int DEFAULT_ESTABLISH_TIMEOUT = 50000;
// seconds until we give up on a connection to s3
public static final String SOCKET_TIMEOUT = "fs.s3a.connection.timeout";
public static final int DEFAULT_SOCKET_TIMEOUT = 50000;
// number of records to get while paging through a directory listing
public static final String MAX_PAGING_KEYS = "fs.s3a.paging.maximum";
public static final int DEFAULT_MAX_PAGING_KEYS = 5000;
// the maximum number of threads to allow in the pool used by TransferManager
public static final String MAX_THREADS = "fs.s3a.threads.max";
public static final int DEFAULT_MAX_THREADS = 256;
// the number of threads to keep in the pool used by TransferManager
public static final String CORE_THREADS = "fs.s3a.threads.core";
public static final int DEFAULT_CORE_THREADS = DEFAULT_MAXIMUM_CONNECTIONS;
// when the number of threads is greater than the core, this is the maximum time
// that excess idle threads will wait for new tasks before terminating.
public static final String KEEPALIVE_TIME = "fs.s3a.threads.keepalivetime";
public static final int DEFAULT_KEEPALIVE_TIME = 60;
// the maximum number of tasks that the LinkedBlockingQueue can hold
public static final String MAX_TOTAL_TASKS = "fs.s3a.max.total.tasks";
public static final int DEFAULT_MAX_TOTAL_TASKS = 1000;
// size of each of or multipart pieces in bytes
public static final String MULTIPART_SIZE = "fs.s3a.multipart.size";
public static final long DEFAULT_MULTIPART_SIZE = 104857600; // 100 MB
// minimum size in bytes before we start a multipart uploads or copy
public static final String MIN_MULTIPART_THRESHOLD = "fs.s3a.multipart.threshold";
public static final long DEFAULT_MIN_MULTIPART_THRESHOLD = Integer.MAX_VALUE;
// comma separated list of directories
public static final String BUFFER_DIR = "fs.s3a.buffer.dir";
// should we upload directly from memory rather than using a file buffer
public static final String FAST_UPLOAD = "fs.s3a.fast.upload";
public static final boolean DEFAULT_FAST_UPLOAD = false;
//initial size of memory buffer for a fast upload
public static final String FAST_BUFFER_SIZE = "fs.s3a.fast.buffer.size";
public static final int DEFAULT_FAST_BUFFER_SIZE = 1048576; //1MB
// private | public-read | public-read-write | authenticated-read |
// log-delivery-write | bucket-owner-read | bucket-owner-full-control
public static final String CANNED_ACL = "fs.s3a.acl.default";
public static final String DEFAULT_CANNED_ACL = "";
// should we try to purge old multipart uploads when starting up
public static final String PURGE_EXISTING_MULTIPART = "fs.s3a.multipart.purge";
public static final boolean DEFAULT_PURGE_EXISTING_MULTIPART = false;
// purge any multipart uploads older than this number of seconds
public static final String PURGE_EXISTING_MULTIPART_AGE = "fs.s3a.multipart.purge.age";
public static final long DEFAULT_PURGE_EXISTING_MULTIPART_AGE = 14400;
// s3 server-side encryption
public static final String SERVER_SIDE_ENCRYPTION_ALGORITHM =
"fs.s3a.server-side-encryption-algorithm";
//override signature algorithm used for signing requests
public static final String SIGNING_ALGORITHM = "fs.s3a.signing-algorithm";
public static final String S3N_FOLDER_SUFFIX = "_$folder$";
public static final String FS_S3A_BLOCK_SIZE = "fs.s3a.block.size";
public static final String FS_S3A = "s3a";
}
| 5,750 | 46.139344 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import com.amazonaws.AmazonClientException;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.AWSCredentials;
import org.apache.commons.lang.StringUtils;
public class BasicAWSCredentialsProvider implements AWSCredentialsProvider {
private final String accessKey;
private final String secretKey;
public BasicAWSCredentialsProvider(String accessKey, String secretKey) {
this.accessKey = accessKey;
this.secretKey = secretKey;
}
public AWSCredentials getCredentials() {
if (!StringUtils.isEmpty(accessKey) && !StringUtils.isEmpty(secretKey)) {
return new BasicAWSCredentials(accessKey, secretKey);
}
throw new AmazonClientException(
"Access key or secret key is null");
}
public void refresh() {}
@Override
public String toString() {
return getClass().getSimpleName();
}
}
| 1,750 | 32.673077 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import com.amazonaws.auth.AWSCredentialsProviderChain;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.CopyObjectRequest;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.services.s3.transfer.Copy;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerConfiguration;
import com.amazonaws.services.s3.transfer.Upload;
import com.amazonaws.event.ProgressListener;
import com.amazonaws.event.ProgressEvent;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import static org.apache.hadoop.fs.s3a.Constants.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class S3AFileSystem extends FileSystem {
/**
* Default blocksize as used in blocksize and FS status queries
*/
public static final int DEFAULT_BLOCKSIZE = 32 * 1024 * 1024;
private URI uri;
private Path workingDir;
private AmazonS3Client s3;
private String bucket;
private int maxKeys;
private long partSize;
private TransferManager transfers;
private ThreadPoolExecutor threadPoolExecutor;
private long multiPartThreshold;
public static final Logger LOG = LoggerFactory.getLogger(S3AFileSystem.class);
private CannedAccessControlList cannedACL;
private String serverSideEncryptionAlgorithm;
// The maximum number of entries that can be deleted in any call to s3
private static final int MAX_ENTRIES_TO_DELETE = 1000;
private static final AtomicInteger poolNumber = new AtomicInteger(1);
/**
* Returns a {@link java.util.concurrent.ThreadFactory} that names each created thread uniquely,
* with a common prefix.
* @param prefix The prefix of every created Thread's name
* @return a {@link java.util.concurrent.ThreadFactory} that names threads
*/
public static ThreadFactory getNamedThreadFactory(final String prefix) {
SecurityManager s = System.getSecurityManager();
final ThreadGroup threadGroup = (s != null) ? s.getThreadGroup() : Thread.currentThread()
.getThreadGroup();
return new ThreadFactory() {
final AtomicInteger threadNumber = new AtomicInteger(1);
private final int poolNum = poolNumber.getAndIncrement();
final ThreadGroup group = threadGroup;
@Override
public Thread newThread(Runnable r) {
final String name = prefix + "-pool" + poolNum + "-t" + threadNumber.getAndIncrement();
return new Thread(group, r, name);
}
};
}
/**
* Get a named {@link ThreadFactory} that just builds daemon threads.
* @param prefix name prefix for all threads created from the factory
* @return a thread factory that creates named, daemon threads with
* the supplied exception handler and normal priority
*/
private static ThreadFactory newDaemonThreadFactory(final String prefix) {
final ThreadFactory namedFactory = getNamedThreadFactory(prefix);
return new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = namedFactory.newThread(r);
if (!t.isDaemon()) {
t.setDaemon(true);
}
if (t.getPriority() != Thread.NORM_PRIORITY) {
t.setPriority(Thread.NORM_PRIORITY);
}
return t;
}
};
}
/** Called after a new FileSystem instance is constructed.
* @param name a uri whose authority section names the host, port, etc.
* for this FileSystem
* @param conf the configuration
*/
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
uri = URI.create(name.getScheme() + "://" + name.getAuthority());
workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri,
this.getWorkingDirectory());
// Try to get our credentials or just connect anonymously
String accessKey = conf.get(ACCESS_KEY, null);
String secretKey = conf.get(SECRET_KEY, null);
String userInfo = name.getUserInfo();
if (userInfo != null) {
int index = userInfo.indexOf(':');
if (index != -1) {
accessKey = userInfo.substring(0, index);
secretKey = userInfo.substring(index + 1);
} else {
accessKey = userInfo;
}
}
AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
new BasicAWSCredentialsProvider(accessKey, secretKey),
new InstanceProfileCredentialsProvider(),
new AnonymousAWSCredentialsProvider()
);
bucket = name.getHost();
ClientConfiguration awsConf = new ClientConfiguration();
awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS,
DEFAULT_MAXIMUM_CONNECTIONS));
boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS,
DEFAULT_SECURE_CONNECTIONS);
awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES,
DEFAULT_MAX_ERROR_RETRIES));
awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT,
DEFAULT_ESTABLISH_TIMEOUT));
awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT,
DEFAULT_SOCKET_TIMEOUT));
String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
if(!signerOverride.isEmpty()) {
awsConf.setSignerOverride(signerOverride);
}
String proxyHost = conf.getTrimmed(PROXY_HOST, "");
int proxyPort = conf.getInt(PROXY_PORT, -1);
if (!proxyHost.isEmpty()) {
awsConf.setProxyHost(proxyHost);
if (proxyPort >= 0) {
awsConf.setProxyPort(proxyPort);
} else {
if (secureConnections) {
LOG.warn("Proxy host set without port. Using HTTPS default 443");
awsConf.setProxyPort(443);
} else {
LOG.warn("Proxy host set without port. Using HTTP default 80");
awsConf.setProxyPort(80);
}
}
String proxyUsername = conf.getTrimmed(PROXY_USERNAME);
String proxyPassword = conf.getTrimmed(PROXY_PASSWORD);
if ((proxyUsername == null) != (proxyPassword == null)) {
String msg = "Proxy error: " + PROXY_USERNAME + " or " +
PROXY_PASSWORD + " set without the other.";
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
awsConf.setProxyUsername(proxyUsername);
awsConf.setProxyPassword(proxyPassword);
awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
if (LOG.isDebugEnabled()) {
LOG.debug("Using proxy server {}:{} as user {} with password {} on " +
"domain {} as workstation {}", awsConf.getProxyHost(),
awsConf.getProxyPort(), String.valueOf(awsConf.getProxyUsername()),
awsConf.getProxyPassword(), awsConf.getProxyDomain(),
awsConf.getProxyWorkstation());
}
} else if (proxyPort >= 0) {
String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
s3 = new AmazonS3Client(credentials, awsConf);
String endPoint = conf.getTrimmed(ENDPOINT,"");
if (!endPoint.isEmpty()) {
try {
s3.setEndpoint(endPoint);
} catch (IllegalArgumentException e) {
String msg = "Incorrect endpoint: " + e.getMessage();
LOG.error(msg);
throw new IllegalArgumentException(msg, e);
}
}
maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
multiPartThreshold = conf.getLong(MIN_MULTIPART_THRESHOLD,
DEFAULT_MIN_MULTIPART_THRESHOLD);
if (partSize < 5 * 1024 * 1024) {
LOG.error(MULTIPART_SIZE + " must be at least 5 MB");
partSize = 5 * 1024 * 1024;
}
if (multiPartThreshold < 5 * 1024 * 1024) {
LOG.error(MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
multiPartThreshold = 5 * 1024 * 1024;
}
int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
int coreThreads = conf.getInt(CORE_THREADS, DEFAULT_CORE_THREADS);
if (maxThreads == 0) {
maxThreads = Runtime.getRuntime().availableProcessors() * 8;
}
if (coreThreads == 0) {
coreThreads = Runtime.getRuntime().availableProcessors() * 8;
}
long keepAliveTime = conf.getLong(KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
LinkedBlockingQueue<Runnable> workQueue =
new LinkedBlockingQueue<>(maxThreads *
conf.getInt(MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS));
threadPoolExecutor = new ThreadPoolExecutor(
coreThreads,
maxThreads,
keepAliveTime,
TimeUnit.SECONDS,
workQueue,
newDaemonThreadFactory("s3a-transfer-shared-"));
threadPoolExecutor.allowCoreThreadTimeOut(true);
TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
transferConfiguration.setMinimumUploadPartSize(partSize);
transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);
transfers = new TransferManager(s3, threadPoolExecutor);
transfers.setConfiguration(transferConfiguration);
String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL);
if (!cannedACLName.isEmpty()) {
cannedACL = CannedAccessControlList.valueOf(cannedACLName);
} else {
cannedACL = null;
}
if (!s3.doesBucketExist(bucket)) {
throw new IOException("Bucket " + bucket + " does not exist");
}
boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART,
DEFAULT_PURGE_EXISTING_MULTIPART);
long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE,
DEFAULT_PURGE_EXISTING_MULTIPART_AGE);
if (purgeExistingMultipart) {
Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge*1000);
transfers.abortMultipartUploads(bucket, purgeBefore);
}
serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM);
setConf(conf);
}
/**
* Return the protocol scheme for the FileSystem.
*
* @return "s3a"
*/
public String getScheme() {
return "s3a";
}
/** Returns a URI whose scheme and authority identify this FileSystem.*/
public URI getUri() {
return uri;
}
/**
* Returns the S3 client used by this filesystem.
* @return AmazonS3Client
*/
@VisibleForTesting
AmazonS3Client getAmazonS3Client() {
return s3;
}
public S3AFileSystem() {
super();
}
/* Turns a path (relative or otherwise) into an S3 key
*/
private String pathToKey(Path path) {
if (!path.isAbsolute()) {
path = new Path(workingDir, path);
}
if (path.toUri().getScheme() != null && path.toUri().getPath().isEmpty()) {
return "";
}
return path.toUri().getPath().substring(1);
}
private Path keyToPath(String key) {
return new Path("/" + key);
}
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
public FSDataInputStream open(Path f, int bufferSize)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Opening '{}' for reading.", f);
}
final FileStatus fileStatus = getFileStatus(f);
if (fileStatus.isDirectory()) {
throw new FileNotFoundException("Can't open " + f + " because it is a directory");
}
return new FSDataInputStream(new S3AInputStream(bucket, pathToKey(f),
fileStatus.getLen(), s3, statistics));
}
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* @param f the file name to open
* @param permission
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
*/
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite,
int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
String key = pathToKey(f);
if (!overwrite && exists(f)) {
throw new FileAlreadyExistsException(f + " already exists");
}
if (getConf().getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD)) {
return new FSDataOutputStream(new S3AFastOutputStream(s3, this, bucket,
key, progress, statistics, cannedACL,
serverSideEncryptionAlgorithm, partSize, multiPartThreshold,
threadPoolExecutor), statistics);
}
// We pass null to FSDataOutputStream so it won't count writes that are being buffered to a file
return new FSDataOutputStream(new S3AOutputStream(getConf(), transfers, this,
bucket, key, progress, cannedACL, statistics,
serverSideEncryptionAlgorithm), null);
}
/**
* Append to an existing file (optional operation).
* @param f the existing file to be appended.
* @param bufferSize the size of the buffer to be used.
* @param progress for reporting progress if it is not null.
* @throws IOException
*/
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*
* Warning: S3 does not support renames. This method does a copy which can
* take S3 some time to execute with large files and directories. Since
* there is no Progressable passed in, this can time out jobs.
*
* Note: This implementation differs with other S3 drivers. Specifically:
* Fails if src is a file and dst is a directory.
* Fails if src is a directory and dst is a file.
* Fails if the parent of dst does not exist or is a file.
* Fails if dst is a directory that is not empty.
*
* @param src path to be renamed
* @param dst new path after rename
* @throws IOException on failure
* @return true if rename is successful
*/
public boolean rename(Path src, Path dst) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Rename path {} to {}", src, dst);
}
String srcKey = pathToKey(src);
String dstKey = pathToKey(dst);
if (srcKey.isEmpty() || dstKey.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("rename: src or dst are empty");
}
return false;
}
S3AFileStatus srcStatus;
try {
srcStatus = getFileStatus(src);
} catch (FileNotFoundException e) {
LOG.error("rename: src not found {}", src);
return false;
}
if (srcKey.equals(dstKey)) {
if (LOG.isDebugEnabled()) {
LOG.debug("rename: src and dst refer to the same file or directory");
}
return srcStatus.isFile();
}
S3AFileStatus dstStatus = null;
try {
dstStatus = getFileStatus(dst);
if (srcStatus.isDirectory() && dstStatus.isFile()) {
if (LOG.isDebugEnabled()) {
LOG.debug("rename: src is a directory and dst is a file");
}
return false;
}
if (dstStatus.isDirectory() && !dstStatus.isEmptyDirectory()) {
return false;
}
} catch (FileNotFoundException e) {
// Parent must exist
Path parent = dst.getParent();
if (!pathToKey(parent).isEmpty()) {
try {
S3AFileStatus dstParentStatus = getFileStatus(dst.getParent());
if (!dstParentStatus.isDirectory()) {
return false;
}
} catch (FileNotFoundException e2) {
return false;
}
}
}
// Ok! Time to start
if (srcStatus.isFile()) {
if (LOG.isDebugEnabled()) {
LOG.debug("rename: renaming file " + src + " to " + dst);
}
if (dstStatus != null && dstStatus.isDirectory()) {
String newDstKey = dstKey;
if (!newDstKey.endsWith("/")) {
newDstKey = newDstKey + "/";
}
String filename =
srcKey.substring(pathToKey(src.getParent()).length()+1);
newDstKey = newDstKey + filename;
copyFile(srcKey, newDstKey);
} else {
copyFile(srcKey, dstKey);
}
delete(src, false);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("rename: renaming directory " + src + " to " + dst);
}
// This is a directory to directory copy
if (!dstKey.endsWith("/")) {
dstKey = dstKey + "/";
}
if (!srcKey.endsWith("/")) {
srcKey = srcKey + "/";
}
//Verify dest is not a child of the source directory
if (dstKey.startsWith(srcKey)) {
if (LOG.isDebugEnabled()) {
LOG.debug("cannot rename a directory to a subdirectory of self");
}
return false;
}
List<DeleteObjectsRequest.KeyVersion> keysToDelete =
new ArrayList<>();
if (dstStatus != null && dstStatus.isEmptyDirectory()) {
// delete unnecessary fake directory.
keysToDelete.add(new DeleteObjectsRequest.KeyVersion(dstKey));
}
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(bucket);
request.setPrefix(srcKey);
request.setMaxKeys(maxKeys);
ObjectListing objects = s3.listObjects(request);
statistics.incrementReadOps(1);
while (true) {
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
keysToDelete.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
String newDstKey = dstKey + summary.getKey().substring(srcKey.length());
copyFile(summary.getKey(), newDstKey);
if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) {
DeleteObjectsRequest deleteRequest =
new DeleteObjectsRequest(bucket).withKeys(keysToDelete);
s3.deleteObjects(deleteRequest);
statistics.incrementWriteOps(1);
keysToDelete.clear();
}
}
if (objects.isTruncated()) {
objects = s3.listNextBatchOfObjects(objects);
statistics.incrementReadOps(1);
} else {
if (keysToDelete.size() > 0) {
DeleteObjectsRequest deleteRequest =
new DeleteObjectsRequest(bucket).withKeys(keysToDelete);
s3.deleteObjects(deleteRequest);
statistics.incrementWriteOps(1);
}
break;
}
}
}
if (src.getParent() != dst.getParent()) {
deleteUnnecessaryFakeDirectories(dst.getParent());
createFakeDirectoryIfNecessary(src.getParent());
}
return true;
}
/** Delete a file.
*
* @param f the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException
*/
public boolean delete(Path f, boolean recursive) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Delete path " + f + " - recursive " + recursive);
}
S3AFileStatus status;
try {
status = getFileStatus(f);
} catch (FileNotFoundException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Couldn't delete " + f + " - does not exist");
}
return false;
}
String key = pathToKey(f);
if (status.isDirectory()) {
if (LOG.isDebugEnabled()) {
LOG.debug("delete: Path is a directory");
}
if (!recursive && !status.isEmptyDirectory()) {
throw new IOException("Path is a folder: " + f +
" and it is not an empty directory");
}
if (!key.endsWith("/")) {
key = key + "/";
}
if (key.equals("/")) {
LOG.info("s3a cannot delete the root directory");
return false;
}
if (status.isEmptyDirectory()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting fake empty directory");
}
s3.deleteObject(bucket, key);
statistics.incrementWriteOps(1);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Getting objects for directory prefix " + key + " to delete");
}
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(bucket);
request.setPrefix(key);
// Hopefully not setting a delimiter will cause this to find everything
//request.setDelimiter("/");
request.setMaxKeys(maxKeys);
List<DeleteObjectsRequest.KeyVersion> keys =
new ArrayList<>();
ObjectListing objects = s3.listObjects(request);
statistics.incrementReadOps(1);
while (true) {
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
if (LOG.isDebugEnabled()) {
LOG.debug("Got object to delete " + summary.getKey());
}
if (keys.size() == MAX_ENTRIES_TO_DELETE) {
DeleteObjectsRequest deleteRequest =
new DeleteObjectsRequest(bucket).withKeys(keys);
s3.deleteObjects(deleteRequest);
statistics.incrementWriteOps(1);
keys.clear();
}
}
if (objects.isTruncated()) {
objects = s3.listNextBatchOfObjects(objects);
statistics.incrementReadOps(1);
} else {
if (!keys.isEmpty()) {
DeleteObjectsRequest deleteRequest =
new DeleteObjectsRequest(bucket).withKeys(keys);
s3.deleteObjects(deleteRequest);
statistics.incrementWriteOps(1);
}
break;
}
}
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("delete: Path is a file");
}
s3.deleteObject(bucket, key);
statistics.incrementWriteOps(1);
}
createFakeDirectoryIfNecessary(f.getParent());
return true;
}
private void createFakeDirectoryIfNecessary(Path f) throws IOException {
String key = pathToKey(f);
if (!key.isEmpty() && !exists(f)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Creating new fake directory at " + f);
}
createFakeDirectory(bucket, key);
}
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f given path
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException {
String key = pathToKey(f);
if (LOG.isDebugEnabled()) {
LOG.debug("List status for path: " + f);
}
final List<FileStatus> result = new ArrayList<FileStatus>();
final FileStatus fileStatus = getFileStatus(f);
if (fileStatus.isDirectory()) {
if (!key.isEmpty()) {
key = key + "/";
}
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(bucket);
request.setPrefix(key);
request.setDelimiter("/");
request.setMaxKeys(maxKeys);
if (LOG.isDebugEnabled()) {
LOG.debug("listStatus: doing listObjects for directory " + key);
}
ObjectListing objects = s3.listObjects(request);
statistics.incrementReadOps(1);
while (true) {
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
Path keyPath = keyToPath(summary.getKey()).makeQualified(uri, workingDir);
// Skip over keys that are ourselves and old S3N _$folder$ files
if (keyPath.equals(f) || summary.getKey().endsWith(S3N_FOLDER_SUFFIX)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring: " + keyPath);
}
continue;
}
if (objectRepresentsDirectory(summary.getKey(), summary.getSize())) {
result.add(new S3AFileStatus(true, true, keyPath));
if (LOG.isDebugEnabled()) {
LOG.debug("Adding: fd: " + keyPath);
}
} else {
result.add(new S3AFileStatus(summary.getSize(),
dateToLong(summary.getLastModified()), keyPath,
getDefaultBlockSize(f.makeQualified(uri, workingDir))));
if (LOG.isDebugEnabled()) {
LOG.debug("Adding: fi: " + keyPath);
}
}
}
for (String prefix : objects.getCommonPrefixes()) {
Path keyPath = keyToPath(prefix).makeQualified(uri, workingDir);
if (keyPath.equals(f)) {
continue;
}
result.add(new S3AFileStatus(true, false, keyPath));
if (LOG.isDebugEnabled()) {
LOG.debug("Adding: rd: " + keyPath);
}
}
if (objects.isTruncated()) {
if (LOG.isDebugEnabled()) {
LOG.debug("listStatus: list truncated - getting next batch");
}
objects = s3.listNextBatchOfObjects(objects);
statistics.incrementReadOps(1);
} else {
break;
}
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding: rd (not a dir): " + f);
}
result.add(fileStatus);
}
return result.toArray(new FileStatus[result.size()]);
}
/**
* Set the current working directory for the given file system. All relative
* paths will be resolved relative to it.
*
* @param new_dir
*/
public void setWorkingDirectory(Path new_dir) {
workingDir = new_dir;
}
/**
* Get the current working directory for the given file system
* @return the directory pathname
*/
public Path getWorkingDirectory() {
return workingDir;
}
/**
* Make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
* @param f path to create
* @param permission to apply to f
*/
// TODO: If we have created an empty file at /foo/bar and we then call
// mkdirs for /foo/bar/baz/roo what happens to the empty file /foo/bar/?
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Making directory: " + f);
}
try {
FileStatus fileStatus = getFileStatus(f);
if (fileStatus.isDirectory()) {
return true;
} else {
throw new FileAlreadyExistsException("Path is a file: " + f);
}
} catch (FileNotFoundException e) {
Path fPart = f;
do {
try {
FileStatus fileStatus = getFileStatus(fPart);
if (fileStatus.isFile()) {
throw new FileAlreadyExistsException(String.format(
"Can't make directory for path '%s' since it is a file.",
fPart));
}
} catch (FileNotFoundException fnfe) {
}
fPart = fPart.getParent();
} while (fPart != null);
String key = pathToKey(f);
createFakeDirectory(bucket, key);
return true;
}
}
/**
* Return a file status object that represents the path.
* @param f The path we want information from
* @return a FileStatus object
* @throws java.io.FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public S3AFileStatus getFileStatus(Path f) throws IOException {
String key = pathToKey(f);
if (LOG.isDebugEnabled()) {
LOG.debug("Getting path status for " + f + " (" + key + ")");
}
if (!key.isEmpty()) {
try {
ObjectMetadata meta = s3.getObjectMetadata(bucket, key);
statistics.incrementReadOps(1);
if (objectRepresentsDirectory(key, meta.getContentLength())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found exact file: fake directory");
}
return new S3AFileStatus(true, true,
f.makeQualified(uri, workingDir));
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Found exact file: normal file");
}
return new S3AFileStatus(meta.getContentLength(),
dateToLong(meta.getLastModified()),
f.makeQualified(uri, workingDir),
getDefaultBlockSize(f.makeQualified(uri, workingDir)));
}
} catch (AmazonServiceException e) {
if (e.getStatusCode() != 404) {
printAmazonServiceException(e);
throw e;
}
} catch (AmazonClientException e) {
printAmazonClientException(e);
throw e;
}
// Necessary?
if (!key.endsWith("/")) {
try {
String newKey = key + "/";
ObjectMetadata meta = s3.getObjectMetadata(bucket, newKey);
statistics.incrementReadOps(1);
if (objectRepresentsDirectory(newKey, meta.getContentLength())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found file (with /): fake directory");
}
return new S3AFileStatus(true, true, f.makeQualified(uri, workingDir));
} else {
LOG.warn("Found file (with /): real file? should not happen: {}", key);
return new S3AFileStatus(meta.getContentLength(),
dateToLong(meta.getLastModified()),
f.makeQualified(uri, workingDir),
getDefaultBlockSize(f.makeQualified(uri, workingDir)));
}
} catch (AmazonServiceException e) {
if (e.getStatusCode() != 404) {
printAmazonServiceException(e);
throw e;
}
} catch (AmazonClientException e) {
printAmazonClientException(e);
throw e;
}
}
}
try {
if (!key.isEmpty() && !key.endsWith("/")) {
key = key + "/";
}
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(bucket);
request.setPrefix(key);
request.setDelimiter("/");
request.setMaxKeys(1);
ObjectListing objects = s3.listObjects(request);
statistics.incrementReadOps(1);
if (!objects.getCommonPrefixes().isEmpty()
|| objects.getObjectSummaries().size() > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found path as directory (with /): " +
objects.getCommonPrefixes().size() + "/" +
objects.getObjectSummaries().size());
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
LOG.debug("Summary: " + summary.getKey() + " " + summary.getSize());
}
for (String prefix : objects.getCommonPrefixes()) {
LOG.debug("Prefix: " + prefix);
}
}
return new S3AFileStatus(true, false,
f.makeQualified(uri, workingDir));
}
} catch (AmazonServiceException e) {
if (e.getStatusCode() != 404) {
printAmazonServiceException(e);
throw e;
}
} catch (AmazonClientException e) {
printAmazonClientException(e);
throw e;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Not Found: " + f);
}
throw new FileNotFoundException("No such file or directory: " + f);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
*
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
*
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src,
Path dst) throws IOException {
String key = pathToKey(dst);
if (!overwrite && exists(dst)) {
throw new IOException(dst + " already exists");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Copying local file from " + src + " to " + dst);
}
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
File srcfile = local.pathToFile(src);
final ObjectMetadata om = new ObjectMetadata();
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setMetadata(om);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch (progressEvent.getEventType()) {
case TRANSFER_PART_COMPLETED_EVENT:
statistics.incrementWriteOps(1);
break;
default:
break;
}
}
};
Upload up = transfers.upload(putObjectRequest);
up.addProgressListener(progressListener);
try {
up.waitForUploadResult();
statistics.incrementWriteOps(1);
} catch (InterruptedException e) {
throw new IOException("Got interrupted, cancelling");
}
// This will delete unnecessary fake parent directories
finishedWrite(key);
if (delSrc) {
local.delete(src, false);
}
}
@Override
public void close() throws IOException {
try {
super.close();
} finally {
if (transfers != null) {
transfers.shutdownNow(true);
transfers = null;
}
}
}
/**
* Override getCononicalServiceName because we don't support token in S3A
*/
@Override
public String getCanonicalServiceName() {
// Does not support Token
return null;
}
private void copyFile(String srcKey, String dstKey) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("copyFile " + srcKey + " -> " + dstKey);
}
ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
final ObjectMetadata dstom = srcom.clone();
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
}
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
copyObjectRequest.setCannedAccessControlList(cannedACL);
copyObjectRequest.setNewObjectMetadata(dstom);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch (progressEvent.getEventType()) {
case TRANSFER_PART_COMPLETED_EVENT:
statistics.incrementWriteOps(1);
break;
default:
break;
}
}
};
Copy copy = transfers.copy(copyObjectRequest);
copy.addProgressListener(progressListener);
try {
copy.waitForCopyResult();
statistics.incrementWriteOps(1);
} catch (InterruptedException e) {
throw new IOException("Got interrupted, cancelling");
}
}
private boolean objectRepresentsDirectory(final String name, final long size) {
return !name.isEmpty() && name.charAt(name.length() - 1) == '/' && size == 0L;
}
// Handles null Dates that can be returned by AWS
private static long dateToLong(final Date date) {
if (date == null) {
return 0L;
}
return date.getTime();
}
public void finishedWrite(String key) throws IOException {
deleteUnnecessaryFakeDirectories(keyToPath(key).getParent());
}
private void deleteUnnecessaryFakeDirectories(Path f) throws IOException {
while (true) {
try {
String key = pathToKey(f);
if (key.isEmpty()) {
break;
}
S3AFileStatus status = getFileStatus(f);
if (status.isDirectory() && status.isEmptyDirectory()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting fake directory " + key + "/");
}
s3.deleteObject(bucket, key + "/");
statistics.incrementWriteOps(1);
}
} catch (FileNotFoundException | AmazonServiceException e) {
}
if (f.isRoot()) {
break;
}
f = f.getParent();
}
}
private void createFakeDirectory(final String bucketName, final String objectName)
throws AmazonClientException, AmazonServiceException {
if (!objectName.endsWith("/")) {
createEmptyObject(bucketName, objectName + "/");
} else {
createEmptyObject(bucketName, objectName);
}
}
// Used to create an empty file that represents an empty directory
private void createEmptyObject(final String bucketName, final String objectName)
throws AmazonClientException, AmazonServiceException {
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(0L);
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
putObjectRequest.setCannedAcl(cannedACL);
s3.putObject(putObjectRequest);
statistics.incrementWriteOps(1);
}
/**
* Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time.
* @deprecated use {@link #getDefaultBlockSize(Path)} instead
*/
@Deprecated
public long getDefaultBlockSize() {
// default to 32MB: large enough to minimize the impact of seeks
return getConf().getLong(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE);
}
private void printAmazonServiceException(AmazonServiceException ase) {
LOG.info("Caught an AmazonServiceException, which means your request made it " +
"to Amazon S3, but was rejected with an error response for some reason.");
LOG.info("Error Message: " + ase.getMessage());
LOG.info("HTTP Status Code: " + ase.getStatusCode());
LOG.info("AWS Error Code: " + ase.getErrorCode());
LOG.info("Error Type: " + ase.getErrorType());
LOG.info("Request ID: " + ase.getRequestId());
LOG.info("Class Name: " + ase.getClass().getName());
}
private void printAmazonClientException(AmazonClientException ace) {
LOG.info("Caught an AmazonClientException, which means the client encountered " +
"a serious internal problem while trying to communicate with S3, " +
"such as not being able to access the network.");
LOG.info("Error Message: {}" + ace, ace);
}
}
| 41,383 | 32.618197 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
public class S3AFileStatus extends FileStatus {
private boolean isEmptyDirectory;
// Directories
public S3AFileStatus(boolean isdir, boolean isemptydir, Path path) {
super(0, isdir, 1, 0, 0, path);
isEmptyDirectory = isemptydir;
}
// Files
public S3AFileStatus(long length, long modification_time, Path path,
long blockSize) {
super(length, false, 1, blockSize, modification_time, path);
isEmptyDirectory = false;
}
public boolean isEmptyDirectory() {
return isEmptyDirectory;
}
/** Compare if this object is equal to another object
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
@Override
public boolean equals(Object o) {
return super.equals(o);
}
/**
* Returns a hash code value for the object, which is defined as
* the hash code of the path name.
*
* @return a hash code value for the path name.
*/
@Override
public int hashCode() {
return super.hashCode();
}
}
| 1,956 | 29.578125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AnonymousAWSCredentials;
import com.amazonaws.auth.AWSCredentials;
public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider {
public AWSCredentials getCredentials() {
return new AnonymousAWSCredentials();
}
public void refresh() {}
@Override
public String toString() {
return getClass().getSimpleName();
}
}
| 1,273 | 32.526316 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import com.amazonaws.event.ProgressEvent;
import com.amazonaws.event.ProgressEventType;
import com.amazonaws.event.ProgressListener;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerConfiguration;
import com.amazonaws.services.s3.transfer.Upload;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import static com.amazonaws.event.ProgressEventType.TRANSFER_COMPLETED_EVENT;
import static com.amazonaws.event.ProgressEventType.TRANSFER_PART_STARTED_EVENT;
import static org.apache.hadoop.fs.s3a.Constants.*;
public class S3AOutputStream extends OutputStream {
private OutputStream backupStream;
private File backupFile;
private boolean closed;
private String key;
private String bucket;
private TransferManager transfers;
private Progressable progress;
private long partSize;
private long partSizeThreshold;
private S3AFileSystem fs;
private CannedAccessControlList cannedACL;
private FileSystem.Statistics statistics;
private LocalDirAllocator lDirAlloc;
private String serverSideEncryptionAlgorithm;
public static final Logger LOG = S3AFileSystem.LOG;
public S3AOutputStream(Configuration conf, TransferManager transfers,
S3AFileSystem fs, String bucket, String key, Progressable progress,
CannedAccessControlList cannedACL, FileSystem.Statistics statistics,
String serverSideEncryptionAlgorithm)
throws IOException {
this.bucket = bucket;
this.key = key;
this.transfers = transfers;
this.progress = progress;
this.fs = fs;
this.cannedACL = cannedACL;
this.statistics = statistics;
this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm;
partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
partSizeThreshold = conf.getLong(MIN_MULTIPART_THRESHOLD,
DEFAULT_MIN_MULTIPART_THRESHOLD);
if (conf.get(BUFFER_DIR, null) != null) {
lDirAlloc = new LocalDirAllocator(BUFFER_DIR);
} else {
lDirAlloc = new LocalDirAllocator("${hadoop.tmp.dir}/s3a");
}
backupFile = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
closed = false;
if (LOG.isDebugEnabled()) {
LOG.debug("OutputStream for key '" + key + "' writing to tempfile: " +
this.backupFile);
}
this.backupStream = new BufferedOutputStream(new FileOutputStream(backupFile));
}
@Override
public void flush() throws IOException {
backupStream.flush();
}
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
backupStream.close();
if (LOG.isDebugEnabled()) {
LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
}
try {
final ObjectMetadata om = new ObjectMetadata();
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setMetadata(om);
Upload upload = transfers.upload(putObjectRequest);
ProgressableProgressListener listener =
new ProgressableProgressListener(upload, progress, statistics);
upload.addProgressListener(listener);
upload.waitForUploadResult();
long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
if (statistics != null && delta != 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
}
statistics.incrementBytesWritten(delta);
}
// This will delete unnecessary fake parent directories
fs.finishedWrite(key);
} catch (InterruptedException e) {
throw new IOException(e);
} finally {
if (!backupFile.delete()) {
LOG.warn("Could not delete temporary s3a file: {}", backupFile);
}
super.close();
closed = true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("OutputStream for key '" + key + "' upload complete");
}
}
@Override
public void write(int b) throws IOException {
backupStream.write(b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
backupStream.write(b, off, len);
}
public static class ProgressableProgressListener implements ProgressListener {
private Progressable progress;
private FileSystem.Statistics statistics;
private long lastBytesTransferred;
private Upload upload;
public ProgressableProgressListener(Upload upload, Progressable progress,
FileSystem.Statistics statistics) {
this.upload = upload;
this.progress = progress;
this.statistics = statistics;
this.lastBytesTransferred = 0;
}
public void progressChanged(ProgressEvent progressEvent) {
if (progress != null) {
progress.progress();
}
// There are 3 http ops here, but this should be close enough for now
ProgressEventType pet = progressEvent.getEventType();
if (pet == TRANSFER_PART_STARTED_EVENT ||
pet == TRANSFER_COMPLETED_EVENT) {
statistics.incrementWriteOps(1);
}
long transferred = upload.getProgress().getBytesTransferred();
long delta = transferred - lastBytesTransferred;
if (statistics != null && delta != 0) {
statistics.incrementBytesWritten(delta);
}
lastBytesTransferred = transferred;
}
public long getLastBytesTransferred() {
return lastBytesTransferred;
}
}
}
| 7,184 | 33.052133 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.slf4j.Logger;
import java.io.EOFException;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.net.SocketException;
public class S3AInputStream extends FSInputStream {
private long pos;
private boolean closed;
private S3ObjectInputStream wrappedStream;
private FileSystem.Statistics stats;
private AmazonS3Client client;
private String bucket;
private String key;
private long contentLength;
public static final Logger LOG = S3AFileSystem.LOG;
public static final long CLOSE_THRESHOLD = 4096;
public S3AInputStream(String bucket, String key, long contentLength, AmazonS3Client client,
FileSystem.Statistics stats) {
this.bucket = bucket;
this.key = key;
this.contentLength = contentLength;
this.client = client;
this.stats = stats;
this.pos = 0;
this.closed = false;
this.wrappedStream = null;
}
private void openIfNeeded() throws IOException {
if (wrappedStream == null) {
reopen(0);
}
}
private synchronized void reopen(long pos) throws IOException {
if (wrappedStream != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Aborting old stream to open at pos " + pos);
}
wrappedStream.abort();
}
if (pos < 0) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK
+" " + pos);
}
if (contentLength > 0 && pos > contentLength-1) {
throw new EOFException(
FSExceptionMessages.CANNOT_SEEK_PAST_EOF
+ " " + pos);
}
LOG.debug("Actually opening file " + key + " at pos " + pos);
GetObjectRequest request = new GetObjectRequest(bucket, key);
request.setRange(pos, contentLength-1);
wrappedStream = client.getObject(request).getObjectContent();
if (wrappedStream == null) {
throw new IOException("Null IO stream");
}
this.pos = pos;
}
@Override
public synchronized long getPos() throws IOException {
return pos;
}
@Override
public synchronized void seek(long pos) throws IOException {
checkNotClosed();
if (this.pos == pos) {
return;
}
LOG.debug(
"Reopening " + this.key + " to seek to new offset " + (pos - this.pos));
reopen(pos);
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
@Override
public synchronized int read() throws IOException {
checkNotClosed();
openIfNeeded();
int byteRead;
try {
byteRead = wrappedStream.read();
} catch (SocketTimeoutException e) {
LOG.info("Got timeout while trying to read from stream, trying to recover " + e);
reopen(pos);
byteRead = wrappedStream.read();
} catch (SocketException e) {
LOG.info("Got socket exception while trying to read from stream, trying to recover " + e);
reopen(pos);
byteRead = wrappedStream.read();
}
if (byteRead >= 0) {
pos++;
}
if (stats != null && byteRead >= 0) {
stats.incrementBytesRead(1);
}
return byteRead;
}
@Override
public synchronized int read(byte[] buf, int off, int len) throws IOException {
checkNotClosed();
openIfNeeded();
int byteRead;
try {
byteRead = wrappedStream.read(buf, off, len);
} catch (SocketTimeoutException e) {
LOG.info("Got timeout while trying to read from stream, trying to recover " + e);
reopen(pos);
byteRead = wrappedStream.read(buf, off, len);
} catch (SocketException e) {
LOG.info("Got socket exception while trying to read from stream, trying to recover " + e);
reopen(pos);
byteRead = wrappedStream.read(buf, off, len);
}
if (byteRead > 0) {
pos += byteRead;
}
if (stats != null && byteRead > 0) {
stats.incrementBytesRead(byteRead);
}
return byteRead;
}
private void checkNotClosed() throws IOException {
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
}
@Override
public synchronized void close() throws IOException {
super.close();
closed = true;
if (wrappedStream != null) {
if (contentLength - pos <= CLOSE_THRESHOLD) {
// Close, rather than abort, so that the http connection can be reused.
wrappedStream.close();
} else {
// Abort, rather than just close, the underlying stream. Otherwise, the
// remaining object payload is read from S3 while closing the stream.
wrappedStream.abort();
}
}
}
@Override
public synchronized int available() throws IOException {
checkNotClosed();
long remaining = this.contentLength - this.pos;
if (remaining > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
}
return (int)remaining;
}
@Override
public boolean markSupported() {
return false;
}
}
| 6,072 | 26.604545 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.event.ProgressEvent;
import com.amazonaws.event.ProgressListener;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.PutObjectResult;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadPoolExecutor;
/**
* Upload files/parts asap directly from a memory buffer (instead of buffering
* to a file).
* <p>
* Uploads are managed low-level rather than through the AWS TransferManager.
* This allows for uploading each part of a multi-part upload as soon as
* the bytes are in memory, rather than waiting until the file is closed.
* <p>
* Unstable: statistics and error handling might evolve
*/
@InterfaceStability.Unstable
public class S3AFastOutputStream extends OutputStream {
private static final Logger LOG = S3AFileSystem.LOG;
private final String key;
private final String bucket;
private final AmazonS3Client client;
private final int partSize;
private final int multiPartThreshold;
private final S3AFileSystem fs;
private final CannedAccessControlList cannedACL;
private final FileSystem.Statistics statistics;
private final String serverSideEncryptionAlgorithm;
private final ProgressListener progressListener;
private final ListeningExecutorService executorService;
private MultiPartUpload multiPartUpload;
private boolean closed;
private ByteArrayOutputStream buffer;
private int bufferLimit;
/**
* Creates a fast OutputStream that uploads to S3 from memory.
* For MultiPartUploads, as soon as sufficient bytes have been written to
* the stream a part is uploaded immediately (by using the low-level
* multi-part upload API on the AmazonS3Client).
*
* @param client AmazonS3Client used for S3 calls
* @param fs S3AFilesystem
* @param bucket S3 bucket name
* @param key S3 key name
* @param progress report progress in order to prevent timeouts
* @param statistics track FileSystem.Statistics on the performed operations
* @param cannedACL used CannedAccessControlList
* @param serverSideEncryptionAlgorithm algorithm for server side encryption
* @param partSize size of a single part in a multi-part upload (except
* last part)
* @param multiPartThreshold files at least this size use multi-part upload
* @throws IOException
*/
public S3AFastOutputStream(AmazonS3Client client, S3AFileSystem fs,
String bucket, String key, Progressable progress,
FileSystem.Statistics statistics, CannedAccessControlList cannedACL,
String serverSideEncryptionAlgorithm, long partSize,
long multiPartThreshold, ThreadPoolExecutor threadPoolExecutor)
throws IOException {
this.bucket = bucket;
this.key = key;
this.client = client;
this.fs = fs;
this.cannedACL = cannedACL;
this.statistics = statistics;
this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm;
//Ensure limit as ByteArrayOutputStream size cannot exceed Integer.MAX_VALUE
if (partSize > Integer.MAX_VALUE) {
this.partSize = Integer.MAX_VALUE;
LOG.warn("s3a: MULTIPART_SIZE capped to ~2.14GB (maximum allowed size " +
"when using 'FAST_UPLOAD = true')");
} else {
this.partSize = (int) partSize;
}
if (multiPartThreshold > Integer.MAX_VALUE) {
this.multiPartThreshold = Integer.MAX_VALUE;
LOG.warn("s3a: MIN_MULTIPART_THRESHOLD capped to ~2.14GB (maximum " +
"allowed size when using 'FAST_UPLOAD = true')");
} else {
this.multiPartThreshold = (int) multiPartThreshold;
}
this.bufferLimit = this.multiPartThreshold;
this.closed = false;
int initialBufferSize = this.fs.getConf()
.getInt(Constants.FAST_BUFFER_SIZE, Constants.DEFAULT_FAST_BUFFER_SIZE);
if (initialBufferSize < 0) {
LOG.warn("s3a: FAST_BUFFER_SIZE should be a positive number. Using " +
"default value");
initialBufferSize = Constants.DEFAULT_FAST_BUFFER_SIZE;
} else if (initialBufferSize > this.bufferLimit) {
LOG.warn("s3a: automatically adjusting FAST_BUFFER_SIZE to not " +
"exceed MIN_MULTIPART_THRESHOLD");
initialBufferSize = this.bufferLimit;
}
this.buffer = new ByteArrayOutputStream(initialBufferSize);
this.executorService = MoreExecutors.listeningDecorator(threadPoolExecutor);
this.multiPartUpload = null;
this.progressListener = new ProgressableListener(progress);
if (LOG.isDebugEnabled()){
LOG.debug("Initialized S3AFastOutputStream for bucket '{}' key '{}'",
bucket, key);
}
}
/**
* Writes a byte to the memory buffer. If this causes the buffer to reach
* its limit, the actual upload is submitted to the threadpool.
* @param b the int of which the lowest byte is written
* @throws IOException
*/
@Override
public synchronized void write(int b) throws IOException {
buffer.write(b);
if (buffer.size() == bufferLimit) {
uploadBuffer();
}
}
/**
* Writes a range of bytes from to the memory buffer. If this causes the
* buffer to reach its limit, the actual upload is submitted to the
* threadpool and the remainder of the array is written to memory
* (recursively).
* @param b byte array containing
* @param off offset in array where to start
* @param len number of bytes to be written
* @throws IOException
*/
@Override
public synchronized void write(byte b[], int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
} else if ((off < 0) || (off > b.length) || (len < 0) ||
((off + len) > b.length) || ((off + len) < 0)) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
if (buffer.size() + len < bufferLimit) {
buffer.write(b, off, len);
} else {
int firstPart = bufferLimit - buffer.size();
buffer.write(b, off, firstPart);
uploadBuffer();
this.write(b, off + firstPart, len - firstPart);
}
}
private synchronized void uploadBuffer() throws IOException {
if (multiPartUpload == null) {
multiPartUpload = initiateMultiPartUpload();
/* Upload the existing buffer if it exceeds partSize. This possibly
requires multiple parts! */
final byte[] allBytes = buffer.toByteArray();
buffer = null; //earlier gc?
if (LOG.isDebugEnabled()) {
LOG.debug("Total length of initial buffer: {}", allBytes.length);
}
int processedPos = 0;
while ((multiPartThreshold - processedPos) >= partSize) {
if (LOG.isDebugEnabled()) {
LOG.debug("Initial buffer: processing from byte {} to byte {}",
processedPos, (processedPos + partSize - 1));
}
multiPartUpload.uploadPartAsync(new ByteArrayInputStream(allBytes,
processedPos, partSize), partSize);
processedPos += partSize;
}
//resize and reset stream
bufferLimit = partSize;
buffer = new ByteArrayOutputStream(bufferLimit);
buffer.write(allBytes, processedPos, multiPartThreshold - processedPos);
} else {
//upload next part
multiPartUpload.uploadPartAsync(new ByteArrayInputStream(buffer
.toByteArray()), partSize);
buffer.reset();
}
}
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
closed = true;
try {
if (multiPartUpload == null) {
putObject();
} else {
if (buffer.size() > 0) {
//send last part
multiPartUpload.uploadPartAsync(new ByteArrayInputStream(buffer
.toByteArray()), buffer.size());
}
final List<PartETag> partETags = multiPartUpload
.waitForAllPartUploads();
multiPartUpload.complete(partETags);
}
statistics.incrementWriteOps(1);
// This will delete unnecessary fake parent directories
fs.finishedWrite(key);
if (LOG.isDebugEnabled()) {
LOG.debug("Upload complete for bucket '{}' key '{}'", bucket, key);
}
} finally {
buffer = null;
super.close();
}
}
private ObjectMetadata createDefaultMetadata() {
ObjectMetadata om = new ObjectMetadata();
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
}
return om;
}
private MultiPartUpload initiateMultiPartUpload() throws IOException {
final ObjectMetadata om = createDefaultMetadata();
final InitiateMultipartUploadRequest initiateMPURequest =
new InitiateMultipartUploadRequest(bucket, key, om);
initiateMPURequest.setCannedACL(cannedACL);
try {
return new MultiPartUpload(
client.initiateMultipartUpload(initiateMPURequest).getUploadId());
} catch (AmazonServiceException ase) {
throw new IOException("Unable to initiate MultiPartUpload (server side)" +
": " + ase, ase);
} catch (AmazonClientException ace) {
throw new IOException("Unable to initiate MultiPartUpload (client side)" +
": " + ace, ace);
}
}
private void putObject() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
key);
}
final ObjectMetadata om = createDefaultMetadata();
om.setContentLength(buffer.size());
final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
new ByteArrayInputStream(buffer.toByteArray()), om);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setGeneralProgressListener(progressListener);
ListenableFuture<PutObjectResult> putObjectResult =
executorService.submit(new Callable<PutObjectResult>() {
@Override
public PutObjectResult call() throws Exception {
return client.putObject(putObjectRequest);
}
});
//wait for completion
try {
putObjectResult.get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted object upload:" + ie, ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
throw new IOException("Regular upload failed", ee.getCause());
}
}
private class MultiPartUpload {
private final String uploadId;
private final List<ListenableFuture<PartETag>> partETagsFutures;
public MultiPartUpload(String uploadId) {
this.uploadId = uploadId;
this.partETagsFutures = new ArrayList<ListenableFuture<PartETag>>();
if (LOG.isDebugEnabled()) {
LOG.debug("Initiated multi-part upload for bucket '{}' key '{}' with " +
"id '{}'", bucket, key, uploadId);
}
}
public void uploadPartAsync(ByteArrayInputStream inputStream,
int partSize) {
final int currentPartNumber = partETagsFutures.size() + 1;
final UploadPartRequest request =
new UploadPartRequest().withBucketName(bucket).withKey(key)
.withUploadId(uploadId).withInputStream(inputStream)
.withPartNumber(currentPartNumber).withPartSize(partSize);
request.setGeneralProgressListener(progressListener);
ListenableFuture<PartETag> partETagFuture =
executorService.submit(new Callable<PartETag>() {
@Override
public PartETag call() throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
uploadId);
}
return client.uploadPart(request).getPartETag();
}
});
partETagsFutures.add(partETagFuture);
}
public List<PartETag> waitForAllPartUploads() throws IOException {
try {
return Futures.allAsList(partETagsFutures).get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted partUpload:" + ie, ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
//there is no way of recovering so abort
//cancel all partUploads
for (ListenableFuture<PartETag> future : partETagsFutures) {
future.cancel(true);
}
//abort multipartupload
this.abort();
throw new IOException("Part upload failed in multi-part upload with " +
"id '" +uploadId + "':" + ee, ee);
}
//should not happen?
return null;
}
public void complete(List<PartETag> partETags) {
if (LOG.isDebugEnabled()) {
LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
uploadId);
}
final CompleteMultipartUploadRequest completeRequest =
new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
client.completeMultipartUpload(completeRequest);
}
public void abort() {
LOG.warn("Aborting multi-part upload with id '{}'", uploadId);
try {
client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket,
key, uploadId));
} catch (Exception e2) {
LOG.warn("Unable to abort multipart upload, you may need to purge " +
"uploaded parts: " + e2, e2);
}
}
}
private static class ProgressableListener implements ProgressListener {
private final Progressable progress;
public ProgressableListener(Progressable progress) {
this.progress = progress;
}
public void progressChanged(ProgressEvent progressEvent) {
if (progress != null) {
progress.progress();
}
}
}
}
| 15,677 | 36.869565 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown if there is a problem communicating with Amazon S3.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class S3Exception extends IOException {
private static final long serialVersionUID = 1L;
public S3Exception(Throwable t) {
super(t);
}
}
| 1,271 | 30.8 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* <p>
* Extracts AWS credentials from the filesystem URI or configuration.
* </p>
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class S3Credentials {
private String accessKey;
private String secretAccessKey;
/**
* @throws IllegalArgumentException if credentials for S3 cannot be
* determined.
* @throws IOException if credential providers are misconfigured and we have
* to talk to them.
*/
public void initialize(URI uri, Configuration conf) throws IOException {
if (uri.getHost() == null) {
throw new IllegalArgumentException("Invalid hostname in URI " + uri);
}
String userInfo = uri.getUserInfo();
if (userInfo != null) {
int index = userInfo.indexOf(':');
if (index != -1) {
accessKey = userInfo.substring(0, index);
secretAccessKey = userInfo.substring(index + 1);
} else {
accessKey = userInfo;
}
}
String scheme = uri.getScheme();
String accessKeyProperty = String.format("fs.%s.awsAccessKeyId", scheme);
String secretAccessKeyProperty =
String.format("fs.%s.awsSecretAccessKey", scheme);
if (accessKey == null) {
accessKey = conf.getTrimmed(accessKeyProperty);
}
if (secretAccessKey == null) {
final char[] pass = conf.getPassword(secretAccessKeyProperty);
if (pass != null) {
secretAccessKey = (new String(pass)).trim();
}
}
if (accessKey == null && secretAccessKey == null) {
throw new IllegalArgumentException("AWS " +
"Access Key ID and Secret Access " +
"Key must be specified as the " +
"username or password " +
"(respectively) of a " + scheme +
" URL, or by setting the " +
accessKeyProperty + " or " +
secretAccessKeyProperty +
" properties (respectively).");
} else if (accessKey == null) {
throw new IllegalArgumentException("AWS " +
"Access Key ID must be specified " +
"as the username of a " + scheme +
" URL, or by setting the " +
accessKeyProperty + " property.");
} else if (secretAccessKey == null) {
throw new IllegalArgumentException("AWS " +
"Secret Access Key must be " +
"specified as the password of a " +
scheme + " URL, or by setting the " +
secretAccessKeyProperty +
" property.");
}
}
public String getAccessKey() {
return accessKey;
}
public String getSecretAccessKey() {
return secretAccessKey;
}
}
| 4,184 | 37.045455 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.IOUtils;
/**
* Holds file metadata including type (regular file, or directory),
* and the list of blocks that are pointers to the data.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class INode {
enum FileType {
DIRECTORY, FILE
}
public static final FileType[] FILE_TYPES = {
FileType.DIRECTORY,
FileType.FILE
};
public static final INode DIRECTORY_INODE = new INode(FileType.DIRECTORY, null);
private FileType fileType;
private Block[] blocks;
public INode(FileType fileType, Block[] blocks) {
this.fileType = fileType;
if (isDirectory() && blocks != null) {
throw new IllegalArgumentException("A directory cannot contain blocks.");
}
this.blocks = blocks;
}
public Block[] getBlocks() {
return blocks;
}
public FileType getFileType() {
return fileType;
}
public boolean isDirectory() {
return fileType == FileType.DIRECTORY;
}
public boolean isFile() {
return fileType == FileType.FILE;
}
public long getSerializedLength() {
return 1L + (blocks == null ? 0 : 4 + blocks.length * 16);
}
public InputStream serialize() throws IOException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(bytes);
try {
out.writeByte(fileType.ordinal());
if (isFile()) {
out.writeInt(blocks.length);
for (int i = 0; i < blocks.length; i++) {
out.writeLong(blocks[i].getId());
out.writeLong(blocks[i].getLength());
}
}
out.close();
out = null;
} finally {
IOUtils.closeStream(out);
}
return new ByteArrayInputStream(bytes.toByteArray());
}
public static INode deserialize(InputStream in) throws IOException {
if (in == null) {
return null;
}
DataInputStream dataIn = new DataInputStream(in);
FileType fileType = INode.FILE_TYPES[dataIn.readByte()];
switch (fileType) {
case DIRECTORY:
in.close();
return INode.DIRECTORY_INODE;
case FILE:
int numBlocks = dataIn.readInt();
Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) {
long id = dataIn.readLong();
long length = dataIn.readLong();
blocks[i] = new Block(id, length);
}
in.close();
return new INode(fileType, blocks);
default:
throw new IllegalArgumentException("Cannot deserialize inode.");
}
}
}
| 3,683 | 27.55814 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/**
* This class contains constants for configuration keys used
* in the s3 file system.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class S3FileSystemConfigKeys extends CommonConfigurationKeys {
public static final String S3_BLOCK_SIZE_KEY = "s3.blocksize";
public static final long S3_BLOCK_SIZE_DEFAULT = 64*1024*1024;
public static final String S3_REPLICATION_KEY = "s3.replication";
public static final short S3_REPLICATION_DEFAULT = 1;
public static final String S3_STREAM_BUFFER_SIZE_KEY =
"s3.stream-buffer-size";
public static final int S3_STREAM_BUFFER_SIZE_DEFAULT = 4096;
public static final String S3_BYTES_PER_CHECKSUM_KEY =
"s3.bytes-per-checksum";
public static final int S3_BYTES_PER_CHECKSUM_DEFAULT = 512;
public static final String S3_CLIENT_WRITE_PACKET_SIZE_KEY =
"s3.client-write-packet-size";
public static final int S3_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
}
| 2,138 | 43.5625 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3.INode.FileType;
import org.jets3t.service.S3Service;
import org.jets3t.service.S3ServiceException;
import org.jets3t.service.ServiceException;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Bucket;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.security.AWSCredentials;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class Jets3tFileSystemStore implements FileSystemStore {
private static final String FILE_SYSTEM_NAME = "fs";
private static final String FILE_SYSTEM_VALUE = "Hadoop";
private static final String FILE_SYSTEM_TYPE_NAME = "fs-type";
private static final String FILE_SYSTEM_TYPE_VALUE = "block";
private static final String FILE_SYSTEM_VERSION_NAME = "fs-version";
private static final String FILE_SYSTEM_VERSION_VALUE = "1";
private static final Map<String, Object> METADATA =
new HashMap<String, Object>();
static {
METADATA.put(FILE_SYSTEM_NAME, FILE_SYSTEM_VALUE);
METADATA.put(FILE_SYSTEM_TYPE_NAME, FILE_SYSTEM_TYPE_VALUE);
METADATA.put(FILE_SYSTEM_VERSION_NAME, FILE_SYSTEM_VERSION_VALUE);
}
private static final String PATH_DELIMITER = Path.SEPARATOR;
private static final String BLOCK_PREFIX = "block_";
private Configuration conf;
private S3Service s3Service;
private S3Bucket bucket;
private int bufferSize;
private static final Log LOG =
LogFactory.getLog(Jets3tFileSystemStore.class.getName());
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
this.conf = conf;
S3Credentials s3Credentials = new S3Credentials();
s3Credentials.initialize(uri, conf);
try {
AWSCredentials awsCredentials =
new AWSCredentials(s3Credentials.getAccessKey(),
s3Credentials.getSecretAccessKey());
this.s3Service = new RestS3Service(awsCredentials);
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
bucket = new S3Bucket(uri.getHost());
this.bufferSize = conf.getInt(
S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_KEY,
S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_DEFAULT
);
}
@Override
public String getVersion() throws IOException {
return FILE_SYSTEM_VERSION_VALUE;
}
private void delete(String key) throws IOException {
try {
s3Service.deleteObject(bucket, key);
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
}
@Override
public void deleteINode(Path path) throws IOException {
delete(pathToKey(path));
}
@Override
public void deleteBlock(Block block) throws IOException {
delete(blockToKey(block));
}
@Override
public boolean inodeExists(Path path) throws IOException {
InputStream in = get(pathToKey(path), true);
if (in == null) {
return false;
}
in.close();
return true;
}
@Override
public boolean blockExists(long blockId) throws IOException {
InputStream in = get(blockToKey(blockId), false);
if (in == null) {
return false;
}
in.close();
return true;
}
private InputStream get(String key, boolean checkMetadata)
throws IOException {
try {
S3Object object = s3Service.getObject(bucket.getName(), key);
if (checkMetadata) {
checkMetadata(object);
}
return object.getDataInputStream();
} catch (S3ServiceException e) {
if ("NoSuchKey".equals(e.getS3ErrorCode())) {
throw new IOException(key + " doesn't exist");
}
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
} catch (ServiceException e) {
handleServiceException(e);
return null;
}
}
private InputStream get(String key, long byteRangeStart) throws IOException {
try {
S3Object object = s3Service.getObject(bucket, key, null, null, null,
null, byteRangeStart, null);
return object.getDataInputStream();
} catch (S3ServiceException e) {
if ("NoSuchKey".equals(e.getS3ErrorCode())) {
return null;
}
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
} catch (ServiceException e) {
handleServiceException(e);
return null;
}
}
private void checkMetadata(S3Object object) throws S3FileSystemException,
S3ServiceException {
String name = (String) object.getMetadata(FILE_SYSTEM_NAME);
if (!FILE_SYSTEM_VALUE.equals(name)) {
throw new S3FileSystemException("Not a Hadoop S3 file.");
}
String type = (String) object.getMetadata(FILE_SYSTEM_TYPE_NAME);
if (!FILE_SYSTEM_TYPE_VALUE.equals(type)) {
throw new S3FileSystemException("Not a block file.");
}
String dataVersion = (String) object.getMetadata(FILE_SYSTEM_VERSION_NAME);
if (!FILE_SYSTEM_VERSION_VALUE.equals(dataVersion)) {
throw new VersionMismatchException(FILE_SYSTEM_VERSION_VALUE,
dataVersion);
}
}
@Override
public INode retrieveINode(Path path) throws IOException {
return INode.deserialize(get(pathToKey(path), true));
}
@Override
public File retrieveBlock(Block block, long byteRangeStart)
throws IOException {
File fileBlock = null;
InputStream in = null;
OutputStream out = null;
try {
fileBlock = newBackupFile();
in = get(blockToKey(block), byteRangeStart);
out = new BufferedOutputStream(new FileOutputStream(fileBlock));
byte[] buf = new byte[bufferSize];
int numRead;
while ((numRead = in.read(buf)) >= 0) {
out.write(buf, 0, numRead);
}
return fileBlock;
} catch (IOException e) {
// close output stream to file then delete file
closeQuietly(out);
out = null; // to prevent a second close
if (fileBlock != null) {
boolean b = fileBlock.delete();
if (!b) {
LOG.warn("Ignoring failed delete");
}
}
throw e;
} finally {
closeQuietly(out);
closeQuietly(in);
}
}
private File newBackupFile() throws IOException {
File dir = new File(conf.get("fs.s3.buffer.dir"));
if (!dir.exists() && !dir.mkdirs()) {
throw new IOException("Cannot create S3 buffer directory: " + dir);
}
File result = File.createTempFile("input-", ".tmp", dir);
result.deleteOnExit();
return result;
}
@Override
public Set<Path> listSubPaths(Path path) throws IOException {
try {
String prefix = pathToKey(path);
if (!prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, PATH_DELIMITER);
Set<Path> prefixes = new TreeSet<Path>();
for (int i = 0; i < objects.length; i++) {
prefixes.add(keyToPath(objects[i].getKey()));
}
prefixes.remove(path);
return prefixes;
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
}
@Override
public Set<Path> listDeepSubPaths(Path path) throws IOException {
try {
String prefix = pathToKey(path);
if (!prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
Set<Path> prefixes = new TreeSet<Path>();
for (int i = 0; i < objects.length; i++) {
prefixes.add(keyToPath(objects[i].getKey()));
}
prefixes.remove(path);
return prefixes;
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
}
private void put(String key, InputStream in, long length, boolean storeMetadata)
throws IOException {
try {
S3Object object = new S3Object(key);
object.setDataInputStream(in);
object.setContentType("binary/octet-stream");
object.setContentLength(length);
if (storeMetadata) {
object.addAllMetadata(METADATA);
}
s3Service.putObject(bucket, object);
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
}
@Override
public void storeINode(Path path, INode inode) throws IOException {
put(pathToKey(path), inode.serialize(), inode.getSerializedLength(), true);
}
@Override
public void storeBlock(Block block, File file) throws IOException {
BufferedInputStream in = null;
try {
in = new BufferedInputStream(new FileInputStream(file));
put(blockToKey(block), in, block.getLength(), false);
} finally {
closeQuietly(in);
}
}
private void closeQuietly(Closeable closeable) {
if (closeable != null) {
try {
closeable.close();
} catch (IOException e) {
// ignore
}
}
}
private String pathToKey(Path path) {
if (!path.isAbsolute()) {
throw new IllegalArgumentException("Path must be absolute: " + path);
}
return path.toUri().getPath();
}
private Path keyToPath(String key) {
return new Path(key);
}
private String blockToKey(long blockId) {
return BLOCK_PREFIX + blockId;
}
private String blockToKey(Block block) {
return blockToKey(block.getId());
}
@Override
public void purge() throws IOException {
try {
S3Object[] objects = s3Service.listObjects(bucket.getName());
for (int i = 0; i < objects.length; i++) {
s3Service.deleteObject(bucket, objects[i].getKey());
}
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
}
@Override
public void dump() throws IOException {
StringBuilder sb = new StringBuilder("S3 Filesystem, ");
sb.append(bucket.getName()).append("\n");
try {
S3Object[] objects = s3Service.listObjects(bucket.getName(), PATH_DELIMITER, null);
for (int i = 0; i < objects.length; i++) {
Path path = keyToPath(objects[i].getKey());
sb.append(path).append("\n");
INode m = retrieveINode(path);
sb.append("\t").append(m.getFileType()).append("\n");
if (m.getFileType() == FileType.DIRECTORY) {
continue;
}
for (int j = 0; j < m.getBlocks().length; j++) {
sb.append("\t").append(m.getBlocks()[j]).append("\n");
}
}
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
System.out.println(sb);
}
private void handleServiceException(ServiceException e) throws IOException {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
else {
if(LOG.isDebugEnabled()) {
LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
}
}
}
}
| 13,121 | 29.516279 | 128 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* A facility for storing and retrieving {@link INode}s and {@link Block}s.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface FileSystemStore {
void initialize(URI uri, Configuration conf) throws IOException;
String getVersion() throws IOException;
void storeINode(Path path, INode inode) throws IOException;
void storeBlock(Block block, File file) throws IOException;
boolean inodeExists(Path path) throws IOException;
boolean blockExists(long blockId) throws IOException;
INode retrieveINode(Path path) throws IOException;
File retrieveBlock(Block block, long byteRangeStart) throws IOException;
void deleteINode(Path path) throws IOException;
void deleteBlock(Block block) throws IOException;
Set<Path> listSubPaths(Path path) throws IOException;
Set<Path> listDeepSubPaths(Path path) throws IOException;
/**
* Delete everything. Used for testing.
* @throws IOException
*/
void purge() throws IOException;
/**
* Diagnostic method to dump all INodes to the console.
* @throws IOException
*/
void dump() throws IOException;
}
| 2,256 | 32.191176 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when there is a fatal exception while using {@link S3FileSystem}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class S3FileSystemException extends IOException {
private static final long serialVersionUID = 1L;
public S3FileSystemException(String message) {
super(message);
}
}
| 1,311 | 34.459459 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/VersionMismatchException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when Hadoop cannot read the version of the data stored
* in {@link S3FileSystem}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class VersionMismatchException extends S3FileSystemException {
private static final long serialVersionUID = 1L;
public VersionMismatchException(String clientVersion, String dataVersion) {
super("Version mismatch: client expects version " + clientVersion +
", but data has version " +
(dataVersion == null ? "[unversioned]" : dataVersion));
}
}
| 1,493 | 38.315789 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3OutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3.INode.FileType;
import org.apache.hadoop.util.Progressable;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class S3OutputStream extends OutputStream {
private Configuration conf;
private int bufferSize;
private FileSystemStore store;
private Path path;
private long blockSize;
private File backupFile;
private OutputStream backupStream;
private Random r = new Random();
private boolean closed;
private int pos = 0;
private long filePos = 0;
private int bytesWrittenToBlock = 0;
private byte[] outBuf;
private List<Block> blocks = new ArrayList<Block>();
private Block nextBlock;
private static final Log LOG =
LogFactory.getLog(S3OutputStream.class.getName());
public S3OutputStream(Configuration conf, FileSystemStore store,
Path path, long blockSize, Progressable progress,
int buffersize) throws IOException {
this.conf = conf;
this.store = store;
this.path = path;
this.blockSize = blockSize;
this.backupFile = newBackupFile();
this.backupStream = new FileOutputStream(backupFile);
this.bufferSize = buffersize;
this.outBuf = new byte[bufferSize];
}
private File newBackupFile() throws IOException {
File dir = new File(conf.get("fs.s3.buffer.dir"));
if (!dir.exists() && !dir.mkdirs()) {
throw new IOException("Cannot create S3 buffer directory: " + dir);
}
File result = File.createTempFile("output-", ".tmp", dir);
result.deleteOnExit();
return result;
}
public long getPos() throws IOException {
return filePos;
}
@Override
public synchronized void write(int b) throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
if ((bytesWrittenToBlock + pos == blockSize) || (pos >= bufferSize)) {
flush();
}
outBuf[pos++] = (byte) b;
filePos++;
}
@Override
public synchronized void write(byte b[], int off, int len) throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
while (len > 0) {
int remaining = bufferSize - pos;
int toWrite = Math.min(remaining, len);
System.arraycopy(b, off, outBuf, pos, toWrite);
pos += toWrite;
off += toWrite;
len -= toWrite;
filePos += toWrite;
if ((bytesWrittenToBlock + pos >= blockSize) || (pos == bufferSize)) {
flush();
}
}
}
@Override
public synchronized void flush() throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
if (bytesWrittenToBlock + pos >= blockSize) {
flushData((int) blockSize - bytesWrittenToBlock);
}
if (bytesWrittenToBlock == blockSize) {
endBlock();
}
flushData(pos);
}
private synchronized void flushData(int maxPos) throws IOException {
int workingPos = Math.min(pos, maxPos);
if (workingPos > 0) {
//
// To the local block backup, write just the bytes
//
backupStream.write(outBuf, 0, workingPos);
//
// Track position
//
bytesWrittenToBlock += workingPos;
System.arraycopy(outBuf, workingPos, outBuf, 0, pos - workingPos);
pos -= workingPos;
}
}
private synchronized void endBlock() throws IOException {
//
// Done with local copy
//
backupStream.close();
//
// Send it to S3
//
// TODO: Use passed in Progressable to report progress.
nextBlockOutputStream();
store.storeBlock(nextBlock, backupFile);
internalClose();
//
// Delete local backup, start new one
//
boolean b = backupFile.delete();
if (!b) {
LOG.warn("Ignoring failed delete");
}
backupFile = newBackupFile();
backupStream = new FileOutputStream(backupFile);
bytesWrittenToBlock = 0;
}
private synchronized void nextBlockOutputStream() throws IOException {
long blockId = r.nextLong();
while (store.blockExists(blockId)) {
blockId = r.nextLong();
}
nextBlock = new Block(blockId, bytesWrittenToBlock);
blocks.add(nextBlock);
bytesWrittenToBlock = 0;
}
private synchronized void internalClose() throws IOException {
INode inode = new INode(FileType.FILE, blocks.toArray(new Block[blocks
.size()]));
store.storeINode(path, inode);
}
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
flush();
if (filePos == 0 || bytesWrittenToBlock != 0) {
endBlock();
}
backupStream.close();
boolean b = backupFile.delete();
if (!b) {
LOG.warn("Ignoring failed delete");
}
super.close();
closed = true;
}
}
| 6,121 | 24.940678 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.jets3t.service.S3Service;
import org.jets3t.service.S3ServiceException;
import org.jets3t.service.ServiceException;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Bucket;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.security.AWSCredentials;
/**
* <p>
* This class is a tool for migrating data from an older to a newer version
* of an S3 filesystem.
* </p>
* <p>
* All files in the filesystem are migrated by re-writing the block metadata
* - no datafiles are touched.
* </p>
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class MigrationTool extends Configured implements Tool {
private S3Service s3Service;
private S3Bucket bucket;
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new MigrationTool(), args);
System.exit(res);
}
@Override
public int run(String[] args) throws Exception {
if (args.length == 0) {
System.err.println("Usage: MigrationTool <S3 file system URI>");
System.err.println("\t<S3 file system URI>\tfilesystem to migrate");
ToolRunner.printGenericCommandUsage(System.err);
return -1;
}
URI uri = URI.create(args[0]);
initialize(uri);
FileSystemStore newStore = new Jets3tFileSystemStore();
newStore.initialize(uri, getConf());
if (get("%2F") != null) {
System.err.println("Current version number is [unversioned].");
System.err.println("Target version number is " +
newStore.getVersion() + ".");
Store oldStore = new UnversionedStore();
migrate(oldStore, newStore);
return 0;
} else {
S3Object root = get("/");
if (root != null) {
String version = (String) root.getMetadata("fs-version");
if (version == null) {
System.err.println("Can't detect version - exiting.");
} else {
String newVersion = newStore.getVersion();
System.err.println("Current version number is " + version + ".");
System.err.println("Target version number is " + newVersion + ".");
if (version.equals(newStore.getVersion())) {
System.err.println("No migration required.");
return 0;
}
// use version number to create Store
//Store oldStore = ...
//migrate(oldStore, newStore);
System.err.println("Not currently implemented.");
return 0;
}
}
System.err.println("Can't detect version - exiting.");
return 0;
}
}
public void initialize(URI uri) throws IOException {
try {
String accessKey = null;
String secretAccessKey = null;
String userInfo = uri.getUserInfo();
if (userInfo != null) {
int index = userInfo.indexOf(':');
if (index != -1) {
accessKey = userInfo.substring(0, index);
secretAccessKey = userInfo.substring(index + 1);
} else {
accessKey = userInfo;
}
}
if (accessKey == null) {
accessKey = getConf().get("fs.s3.awsAccessKeyId");
}
if (secretAccessKey == null) {
secretAccessKey = getConf().get("fs.s3.awsSecretAccessKey");
}
if (accessKey == null && secretAccessKey == null) {
throw new IllegalArgumentException("AWS " +
"Access Key ID and Secret Access Key " +
"must be specified as the username " +
"or password (respectively) of a s3 URL, " +
"or by setting the " +
"fs.s3.awsAccessKeyId or " +
"fs.s3.awsSecretAccessKey properties (respectively).");
} else if (accessKey == null) {
throw new IllegalArgumentException("AWS " +
"Access Key ID must be specified " +
"as the username of a s3 URL, or by setting the " +
"fs.s3.awsAccessKeyId property.");
} else if (secretAccessKey == null) {
throw new IllegalArgumentException("AWS " +
"Secret Access Key must be specified " +
"as the password of a s3 URL, or by setting the " +
"fs.s3.awsSecretAccessKey property.");
}
AWSCredentials awsCredentials =
new AWSCredentials(accessKey, secretAccessKey);
this.s3Service = new RestS3Service(awsCredentials);
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
bucket = new S3Bucket(uri.getHost());
}
private void migrate(Store oldStore, FileSystemStore newStore)
throws IOException {
for (Path path : oldStore.listAllPaths()) {
INode inode = oldStore.retrieveINode(path);
oldStore.deleteINode(path);
newStore.storeINode(path, inode);
}
}
private S3Object get(String key) {
try {
return s3Service.getObject(bucket.getName(), key);
} catch (S3ServiceException e) {
if ("NoSuchKey".equals(e.getS3ErrorCode())) {
return null;
}
}
return null;
}
interface Store {
Set<Path> listAllPaths() throws IOException;
INode retrieveINode(Path path) throws IOException;
void deleteINode(Path path) throws IOException;
}
class UnversionedStore implements Store {
@Override
public Set<Path> listAllPaths() throws IOException {
try {
String prefix = urlEncode(Path.SEPARATOR);
S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
Set<Path> prefixes = new TreeSet<Path>();
for (int i = 0; i < objects.length; i++) {
prefixes.add(keyToPath(objects[i].getKey()));
}
return prefixes;
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
}
@Override
public void deleteINode(Path path) throws IOException {
delete(pathToKey(path));
}
private void delete(String key) throws IOException {
try {
s3Service.deleteObject(bucket, key);
} catch (S3ServiceException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
}
}
@Override
public INode retrieveINode(Path path) throws IOException {
return INode.deserialize(get(pathToKey(path)));
}
private InputStream get(String key) throws IOException {
try {
S3Object object = s3Service.getObject(bucket.getName(), key);
return object.getDataInputStream();
} catch (S3ServiceException e) {
if ("NoSuchKey".equals(e.getS3ErrorCode())) {
return null;
}
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw new S3Exception(e);
} catch (ServiceException e) {
return null;
}
}
private String pathToKey(Path path) {
if (!path.isAbsolute()) {
throw new IllegalArgumentException("Path must be absolute: " + path);
}
return urlEncode(path.toUri().getPath());
}
private Path keyToPath(String key) {
return new Path(urlDecode(key));
}
private String urlEncode(String s) {
try {
return URLEncoder.encode(s, "UTF-8");
} catch (UnsupportedEncodingException e) {
// Should never happen since every implementation of the Java Platform
// is required to support UTF-8.
// See http://java.sun.com/j2se/1.5.0/docs/api/java/nio/charset/Charset.html
throw new IllegalStateException(e);
}
}
private String urlDecode(String s) {
try {
return URLDecoder.decode(s, "UTF-8");
} catch (UnsupportedEncodingException e) {
// Should never happen since every implementation of the Java Platform
// is required to support UTF-8.
// See http://java.sun.com/j2se/1.5.0/docs/api/java/nio/charset/Charset.html
throw new IllegalStateException(e);
}
}
}
}
| 9,961 | 33.116438 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Holds metadata about a block of data being stored in a {@link FileSystemStore}.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class Block {
private long id;
private long length;
public Block(long id, long length) {
this.id = id;
this.length = length;
}
public long getId() {
return id;
}
public long getLength() {
return length;
}
@Override
public String toString() {
return "Block[" + id + ", " + length + "]";
}
}
| 1,454 | 26.45283 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.util.Progressable;
/**
* A block-based {@link FileSystem} backed by
* <a href="http://aws.amazon.com/s3">Amazon S3</a>.
*
* @see NativeS3FileSystem
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class S3FileSystem extends FileSystem {
private URI uri;
private FileSystemStore store;
private Path workingDir;
public S3FileSystem() {
// set store in initialize()
}
public S3FileSystem(FileSystemStore store) {
this.store = store;
}
/**
* Return the protocol scheme for the FileSystem.
*
* @return <code>s3</code>
*/
@Override
public String getScheme() {
return "s3";
}
@Override
public URI getUri() {
return uri;
}
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
if (store == null) {
store = createDefaultStore(conf);
}
store.initialize(uri, conf);
setConf(conf);
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
this.workingDir =
new Path("/user", System.getProperty("user.name")).makeQualified(this);
}
private static FileSystemStore createDefaultStore(Configuration conf) {
FileSystemStore store = new Jets3tFileSystemStore();
RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
conf.getInt("fs.s3.maxRetries", 4),
conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(IOException.class, basePolicy);
exceptionToPolicyMap.put(S3Exception.class, basePolicy);
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
methodNameToPolicyMap.put("storeBlock", methodPolicy);
methodNameToPolicyMap.put("retrieveBlock", methodPolicy);
return (FileSystemStore) RetryProxy.create(FileSystemStore.class,
store, methodNameToPolicyMap);
}
@Override
public Path getWorkingDirectory() {
return workingDir;
}
@Override
public void setWorkingDirectory(Path dir) {
workingDir = makeAbsolute(dir);
}
private Path makeAbsolute(Path path) {
if (path.isAbsolute()) {
return path;
}
return new Path(workingDir, path);
}
/**
* @param permission Currently ignored.
*/
@Override
public boolean mkdirs(Path path, FsPermission permission) throws IOException {
Path absolutePath = makeAbsolute(path);
List<Path> paths = new ArrayList<Path>();
do {
paths.add(0, absolutePath);
absolutePath = absolutePath.getParent();
} while (absolutePath != null);
boolean result = true;
for (Path p : paths) {
result &= mkdir(p);
}
return result;
}
private boolean mkdir(Path path) throws IOException {
Path absolutePath = makeAbsolute(path);
INode inode = store.retrieveINode(absolutePath);
if (inode == null) {
store.storeINode(absolutePath, INode.DIRECTORY_INODE);
} else if (inode.isFile()) {
throw new IOException(String.format(
"Can't make directory for path %s since it is a file.",
absolutePath));
}
return true;
}
@Override
public boolean isFile(Path path) throws IOException {
INode inode = store.retrieveINode(makeAbsolute(path));
if (inode == null) {
return false;
}
return inode.isFile();
}
private INode checkFile(Path path) throws IOException {
INode inode = store.retrieveINode(makeAbsolute(path));
if (inode == null) {
throw new IOException("No such file.");
}
if (inode.isDirectory()) {
throw new IOException("Path " + path + " is a directory.");
}
return inode;
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
Path absolutePath = makeAbsolute(f);
INode inode = store.retrieveINode(absolutePath);
if (inode == null) {
throw new FileNotFoundException("File " + f + " does not exist.");
}
if (inode.isFile()) {
return new FileStatus[] {
new S3FileStatus(f.makeQualified(this), inode)
};
}
ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
for (Path p : store.listSubPaths(absolutePath)) {
ret.add(getFileStatus(p.makeQualified(this)));
}
return ret.toArray(new FileStatus[0]);
}
/** This optional operation is not yet supported. */
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
}
/**
* @param permission Currently ignored.
*/
@Override
public FSDataOutputStream create(Path file, FsPermission permission,
boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress)
throws IOException {
INode inode = store.retrieveINode(makeAbsolute(file));
if (inode != null) {
if (overwrite) {
delete(file, true);
} else {
throw new FileAlreadyExistsException("File already exists: " + file);
}
} else {
Path parent = file.getParent();
if (parent != null) {
if (!mkdirs(parent)) {
throw new IOException("Mkdirs failed to create " + parent.toString());
}
}
}
return new FSDataOutputStream
(new S3OutputStream(getConf(), store, makeAbsolute(file),
blockSize, progress, bufferSize),
statistics);
}
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
INode inode = checkFile(path);
return new FSDataInputStream(new S3InputStream(getConf(), store, inode,
statistics));
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
Path absoluteSrc = makeAbsolute(src);
INode srcINode = store.retrieveINode(absoluteSrc);
if (srcINode == null) {
// src path doesn't exist
return false;
}
Path absoluteDst = makeAbsolute(dst);
INode dstINode = store.retrieveINode(absoluteDst);
if (dstINode != null && dstINode.isDirectory()) {
absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
dstINode = store.retrieveINode(absoluteDst);
}
if (dstINode != null) {
// dst path already exists - can't overwrite
return false;
}
Path dstParent = absoluteDst.getParent();
if (dstParent != null) {
INode dstParentINode = store.retrieveINode(dstParent);
if (dstParentINode == null || dstParentINode.isFile()) {
// dst parent doesn't exist or is a file
return false;
}
}
return renameRecursive(absoluteSrc, absoluteDst);
}
private boolean renameRecursive(Path src, Path dst) throws IOException {
INode srcINode = store.retrieveINode(src);
store.storeINode(dst, srcINode);
store.deleteINode(src);
if (srcINode.isDirectory()) {
for (Path oldSrc : store.listDeepSubPaths(src)) {
INode inode = store.retrieveINode(oldSrc);
if (inode == null) {
return false;
}
String oldSrcPath = oldSrc.toUri().getPath();
String srcPath = src.toUri().getPath();
String dstPath = dst.toUri().getPath();
Path newDst = new Path(oldSrcPath.replaceFirst(srcPath, dstPath));
store.storeINode(newDst, inode);
store.deleteINode(oldSrc);
}
}
return true;
}
@Override
public boolean delete(Path path, boolean recursive) throws IOException {
Path absolutePath = makeAbsolute(path);
INode inode = store.retrieveINode(absolutePath);
if (inode == null) {
return false;
}
if (inode.isFile()) {
store.deleteINode(absolutePath);
for (Block block: inode.getBlocks()) {
store.deleteBlock(block);
}
} else {
FileStatus[] contents = null;
try {
contents = listStatus(absolutePath);
} catch(FileNotFoundException fnfe) {
return false;
}
if ((contents.length !=0) && (!recursive)) {
throw new IOException("Directory " + path.toString()
+ " is not empty.");
}
for (FileStatus p:contents) {
if (!delete(p.getPath(), recursive)) {
return false;
}
}
store.deleteINode(absolutePath);
}
return true;
}
/**
* FileStatus for S3 file systems.
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
INode inode = store.retrieveINode(makeAbsolute(f));
if (inode == null) {
throw new FileNotFoundException(f + ": No such file or directory.");
}
return new S3FileStatus(f.makeQualified(this), inode);
}
@Override
public long getDefaultBlockSize() {
return getConf().getLong("fs.s3.block.size", 64 * 1024 * 1024);
}
@Override
public String getCanonicalServiceName() {
// Does not support Token
return null;
}
// diagnostic methods
void dump() throws IOException {
store.dump();
}
void purge() throws IOException {
store.purge();
}
private static class S3FileStatus extends FileStatus {
S3FileStatus(Path f, INode inode) throws IOException {
super(findLength(inode), inode.isDirectory(), 1,
findBlocksize(inode), 0, f);
}
private static long findLength(INode inode) {
if (!inode.isDirectory()) {
long length = 0L;
for (Block block : inode.getBlocks()) {
length += block.getLength();
}
return length;
}
return 0;
}
private static long findBlocksize(INode inode) {
final Block[] ret = inode.getBlocks();
return ret == null ? 0L : ret[0].getLength();
}
}
}
| 11,949 | 29.484694 | 141 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileSystem;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class S3InputStream extends FSInputStream {
private FileSystemStore store;
private Block[] blocks;
private boolean closed;
private long fileLength;
private long pos = 0;
private File blockFile;
private DataInputStream blockStream;
private long blockEnd = -1;
private FileSystem.Statistics stats;
private static final Log LOG =
LogFactory.getLog(S3InputStream.class.getName());
@Deprecated
public S3InputStream(Configuration conf, FileSystemStore store,
INode inode) {
this(conf, store, inode, null);
}
public S3InputStream(Configuration conf, FileSystemStore store,
INode inode, FileSystem.Statistics stats) {
this.store = store;
this.stats = stats;
this.blocks = inode.getBlocks();
for (Block block : blocks) {
this.fileLength += block.getLength();
}
}
@Override
public synchronized long getPos() throws IOException {
return pos;
}
@Override
public synchronized int available() throws IOException {
return (int) (fileLength - pos);
}
@Override
public synchronized void seek(long targetPos) throws IOException {
if (targetPos > fileLength) {
throw new IOException("Cannot seek after EOF");
}
pos = targetPos;
blockEnd = -1;
}
@Override
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
@Override
public synchronized int read() throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
int result = -1;
if (pos < fileLength) {
if (pos > blockEnd) {
blockSeekTo(pos);
}
result = blockStream.read();
if (result >= 0) {
pos++;
}
}
if (stats != null && result >= 0) {
stats.incrementBytesRead(1);
}
return result;
}
@Override
public synchronized int read(byte buf[], int off, int len) throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
if (pos < fileLength) {
if (pos > blockEnd) {
blockSeekTo(pos);
}
int realLen = (int) Math.min((long) len, (blockEnd - pos + 1L));
int result = blockStream.read(buf, off, realLen);
if (result >= 0) {
pos += result;
}
if (stats != null && result > 0) {
stats.incrementBytesRead(result);
}
return result;
}
return -1;
}
private synchronized void blockSeekTo(long target) throws IOException {
//
// Compute desired block
//
int targetBlock = -1;
long targetBlockStart = 0;
long targetBlockEnd = 0;
for (int i = 0; i < blocks.length; i++) {
long blockLength = blocks[i].getLength();
targetBlockEnd = targetBlockStart + blockLength - 1;
if (target >= targetBlockStart && target <= targetBlockEnd) {
targetBlock = i;
break;
} else {
targetBlockStart = targetBlockEnd + 1;
}
}
if (targetBlock < 0) {
throw new IOException(
"Impossible situation: could not find target position " + target);
}
long offsetIntoBlock = target - targetBlockStart;
// read block blocks[targetBlock] from position offsetIntoBlock
this.blockFile = store.retrieveBlock(blocks[targetBlock], offsetIntoBlock);
this.pos = target;
this.blockEnd = targetBlockEnd;
this.blockStream = new DataInputStream(new FileInputStream(blockFile));
}
@Override
public void close() throws IOException {
if (closed) {
return;
}
if (blockStream != null) {
blockStream.close();
blockStream = null;
}
if (blockFile != null) {
boolean b = blockFile.delete();
if (!b) {
LOG.warn("Ignoring failed delete");
}
}
super.close();
closed = true;
}
/**
* We don't support marks.
*/
@Override
public boolean markSupported() {
return false;
}
@Override
public void mark(int readLimit) {
// Do nothing
}
@Override
public void reset() throws IOException {
throw new IOException("Mark not supported");
}
}
| 5,496 | 24.449074 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/PartialListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* Holds information on a directory listing for a
* {@link NativeFileSystemStore}.
* This includes the {@link FileMetadata files} and directories
* (their names) contained in a directory.
* </p>
* <p>
* This listing may be returned in chunks, so a <code>priorLastKey</code>
* is provided so that the next chunk may be requested.
* </p>
* @see NativeFileSystemStore#list(String, int, String)
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class PartialListing {
private final String priorLastKey;
private final FileMetadata[] files;
private final String[] commonPrefixes;
public PartialListing(String priorLastKey, FileMetadata[] files,
String[] commonPrefixes) {
this.priorLastKey = priorLastKey;
this.files = files;
this.commonPrefixes = commonPrefixes;
}
public FileMetadata[] getFiles() {
return files;
}
public String[] getCommonPrefixes() {
return commonPrefixes;
}
public String getPriorLastKey() {
return priorLastKey;
}
}
| 1,995 | 29.707692 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.s3.S3Credentials;
import org.apache.hadoop.fs.s3.S3Exception;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.jets3t.service.S3Service;
import org.jets3t.service.S3ServiceException;
import org.jets3t.service.ServiceException;
import org.jets3t.service.StorageObjectsChunk;
import org.jets3t.service.impl.rest.HttpException;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.MultipartPart;
import org.jets3t.service.model.MultipartUpload;
import org.jets3t.service.model.S3Bucket;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.model.StorageObject;
import org.jets3t.service.security.AWSCredentials;
import org.jets3t.service.utils.MultipartUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
private S3Service s3Service;
private S3Bucket bucket;
private long multipartBlockSize;
private boolean multipartEnabled;
private long multipartCopyBlockSize;
static final long MAX_PART_SIZE = (long)5 * 1024 * 1024 * 1024;
private String serverSideEncryptionAlgorithm;
public static final Logger LOG =
LoggerFactory.getLogger(Jets3tNativeFileSystemStore.class);
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
S3Credentials s3Credentials = new S3Credentials();
s3Credentials.initialize(uri, conf);
try {
AWSCredentials awsCredentials =
new AWSCredentials(s3Credentials.getAccessKey(),
s3Credentials.getSecretAccessKey());
this.s3Service = new RestS3Service(awsCredentials);
} catch (S3ServiceException e) {
handleException(e);
}
multipartEnabled =
conf.getBoolean("fs.s3n.multipart.uploads.enabled", false);
multipartBlockSize = Math.min(
conf.getLong("fs.s3n.multipart.uploads.block.size", 64 * 1024 * 1024),
MAX_PART_SIZE);
multipartCopyBlockSize = Math.min(
conf.getLong("fs.s3n.multipart.copy.block.size", MAX_PART_SIZE),
MAX_PART_SIZE);
serverSideEncryptionAlgorithm = conf.get("fs.s3n.server-side-encryption-algorithm");
bucket = new S3Bucket(uri.getHost());
}
@Override
public void storeFile(String key, File file, byte[] md5Hash)
throws IOException {
if (multipartEnabled && file.length() >= multipartBlockSize) {
storeLargeFile(key, file, md5Hash);
return;
}
BufferedInputStream in = null;
try {
in = new BufferedInputStream(new FileInputStream(file));
S3Object object = new S3Object(key);
object.setDataInputStream(in);
object.setContentType("binary/octet-stream");
object.setContentLength(file.length());
object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
if (md5Hash != null) {
object.setMd5Hash(md5Hash);
}
s3Service.putObject(bucket, object);
} catch (ServiceException e) {
handleException(e, key);
} finally {
IOUtils.closeStream(in);
}
}
public void storeLargeFile(String key, File file, byte[] md5Hash)
throws IOException {
S3Object object = new S3Object(key);
object.setDataInputFile(file);
object.setContentType("binary/octet-stream");
object.setContentLength(file.length());
object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
if (md5Hash != null) {
object.setMd5Hash(md5Hash);
}
List<StorageObject> objectsToUploadAsMultipart =
new ArrayList<StorageObject>();
objectsToUploadAsMultipart.add(object);
MultipartUtils mpUtils = new MultipartUtils(multipartBlockSize);
try {
mpUtils.uploadObjects(bucket.getName(), s3Service,
objectsToUploadAsMultipart, null);
} catch (Exception e) {
handleException(e, key);
}
}
@Override
public void storeEmptyFile(String key) throws IOException {
try {
S3Object object = new S3Object(key);
object.setDataInputStream(new ByteArrayInputStream(new byte[0]));
object.setContentType("binary/octet-stream");
object.setContentLength(0);
object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
s3Service.putObject(bucket, object);
} catch (ServiceException e) {
handleException(e, key);
}
}
@Override
public FileMetadata retrieveMetadata(String key) throws IOException {
StorageObject object = null;
try {
LOG.debug("Getting metadata for key: {} from bucket: {}",
key, bucket.getName());
object = s3Service.getObjectDetails(bucket.getName(), key);
return new FileMetadata(key, object.getContentLength(),
object.getLastModifiedDate().getTime());
} catch (ServiceException e) {
try {
// process
handleException(e, key);
return null;
} catch (FileNotFoundException fnfe) {
// and downgrade missing files
return null;
}
} finally {
if (object != null) {
object.closeDataInputStream();
}
}
}
/**
* @param key
* The key is the object name that is being retrieved from the S3 bucket
* @return
* This method returns null if the key is not found
* @throws IOException
*/
@Override
public InputStream retrieve(String key) throws IOException {
try {
LOG.debug("Getting key: {} from bucket: {}",
key, bucket.getName());
S3Object object = s3Service.getObject(bucket.getName(), key);
return object.getDataInputStream();
} catch (ServiceException e) {
handleException(e, key);
return null; //return null if key not found
}
}
/**
*
* @param key
* The key is the object name that is being retrieved from the S3 bucket
* @return
* This method returns null if the key is not found
* @throws IOException
*/
@Override
public InputStream retrieve(String key, long byteRangeStart)
throws IOException {
try {
LOG.debug("Getting key: {} from bucket: {} with byteRangeStart: {}",
key, bucket.getName(), byteRangeStart);
S3Object object = s3Service.getObject(bucket, key, null, null, null,
null, byteRangeStart, null);
return object.getDataInputStream();
} catch (ServiceException e) {
handleException(e, key);
return null;
}
}
@Override
public PartialListing list(String prefix, int maxListingLength)
throws IOException {
return list(prefix, maxListingLength, null, false);
}
@Override
public PartialListing list(String prefix, int maxListingLength, String priorLastKey,
boolean recurse) throws IOException {
return list(prefix, recurse ? null : PATH_DELIMITER, maxListingLength, priorLastKey);
}
/**
* list objects
* @param prefix prefix
* @param delimiter delimiter
* @param maxListingLength max no. of entries
* @param priorLastKey last key in any previous search
* @return a list of matches
* @throws IOException on any reported failure
*/
private PartialListing list(String prefix, String delimiter,
int maxListingLength, String priorLastKey) throws IOException {
try {
if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
prefix, delimiter, maxListingLength, priorLastKey);
FileMetadata[] fileMetadata =
new FileMetadata[chunk.getObjects().length];
for (int i = 0; i < fileMetadata.length; i++) {
StorageObject object = chunk.getObjects()[i];
fileMetadata[i] = new FileMetadata(object.getKey(),
object.getContentLength(), object.getLastModifiedDate().getTime());
}
return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
chunk.getCommonPrefixes());
} catch (ServiceException e) {
handleException(e, prefix);
return null; // never returned - keep compiler happy
}
}
@Override
public void delete(String key) throws IOException {
try {
LOG.debug("Deleting key: {} from bucket: {}",
key, bucket.getName());
s3Service.deleteObject(bucket, key);
} catch (ServiceException e) {
handleException(e, key);
}
}
public void rename(String srcKey, String dstKey) throws IOException {
try {
s3Service.renameObject(bucket.getName(), srcKey, new S3Object(dstKey));
} catch (ServiceException e) {
handleException(e, srcKey);
}
}
@Override
public void copy(String srcKey, String dstKey) throws IOException {
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Copying srcKey: " + srcKey + "to dstKey: " + dstKey + "in bucket: " + bucket.getName());
}
if (multipartEnabled) {
S3Object object = s3Service.getObjectDetails(bucket, srcKey, null,
null, null, null);
if (multipartCopyBlockSize > 0 &&
object.getContentLength() > multipartCopyBlockSize) {
copyLargeFile(object, dstKey);
return;
}
}
S3Object dstObject = new S3Object(dstKey);
dstObject.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
dstObject, false);
} catch (ServiceException e) {
handleException(e, srcKey);
}
}
public void copyLargeFile(S3Object srcObject, String dstKey) throws IOException {
try {
long partCount = srcObject.getContentLength() / multipartCopyBlockSize +
(srcObject.getContentLength() % multipartCopyBlockSize > 0 ? 1 : 0);
MultipartUpload multipartUpload = s3Service.multipartStartUpload
(bucket.getName(), dstKey, srcObject.getMetadataMap());
List<MultipartPart> listedParts = new ArrayList<MultipartPart>();
for (int i = 0; i < partCount; i++) {
long byteRangeStart = i * multipartCopyBlockSize;
long byteLength;
if (i < partCount - 1) {
byteLength = multipartCopyBlockSize;
} else {
byteLength = srcObject.getContentLength() % multipartCopyBlockSize;
if (byteLength == 0) {
byteLength = multipartCopyBlockSize;
}
}
MultipartPart copiedPart = s3Service.multipartUploadPartCopy
(multipartUpload, i + 1, bucket.getName(), srcObject.getKey(),
null, null, null, null, byteRangeStart,
byteRangeStart + byteLength - 1, null);
listedParts.add(copiedPart);
}
Collections.reverse(listedParts);
s3Service.multipartCompleteUpload(multipartUpload, listedParts);
} catch (ServiceException e) {
handleException(e, srcObject.getKey());
}
}
@Override
public void purge(String prefix) throws IOException {
String key = "";
try {
S3Object[] objects =
s3Service.listObjects(bucket.getName(), prefix, null);
for (S3Object object : objects) {
key = object.getKey();
s3Service.deleteObject(bucket, key);
}
} catch (S3ServiceException e) {
handleException(e, key);
}
}
@Override
public void dump() throws IOException {
StringBuilder sb = new StringBuilder("S3 Native Filesystem, ");
sb.append(bucket.getName()).append("\n");
try {
S3Object[] objects = s3Service.listObjects(bucket.getName());
for (S3Object object : objects) {
sb.append(object.getKey()).append("\n");
}
} catch (S3ServiceException e) {
handleException(e);
}
System.out.println(sb);
}
/**
* Handle any service exception by translating it into an IOException
* @param e exception
* @throws IOException exception -always
*/
private void handleException(Exception e) throws IOException {
throw processException(e, e, "");
}
/**
* Handle any service exception by translating it into an IOException
* @param e exception
* @param key key sought from object store
* @throws IOException exception -always
*/
private void handleException(Exception e, String key) throws IOException {
throw processException(e, e, key);
}
/**
* Handle any service exception by translating it into an IOException
* @param thrown exception
* @param original original exception -thrown if no other translation could
* be made
* @param key key sought from object store or "" for undefined
* @return an exception to throw. If isProcessingCause==true this may be null.
*/
private IOException processException(Throwable thrown, Throwable original,
String key) {
IOException result;
if (thrown.getCause() != null) {
// recurse down
result = processException(thrown.getCause(), original, key);
} else if (thrown instanceof HttpException) {
// nested HttpException - examine error code and react
HttpException httpException = (HttpException) thrown;
String responseMessage = httpException.getResponseMessage();
int responseCode = httpException.getResponseCode();
String bucketName = "s3n://" + bucket.getName();
String text = String.format("%s : %03d : %s",
bucketName,
responseCode,
responseMessage);
String filename = !key.isEmpty() ? (bucketName + "/" + key) : text;
IOException ioe;
switch (responseCode) {
case 404:
result = new FileNotFoundException(filename);
break;
case 416: // invalid range
result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF
+": " + filename);
break;
case 403: //forbidden
result = new AccessControlException("Permission denied"
+": " + filename);
break;
default:
result = new IOException(text);
}
result.initCause(thrown);
} else if (thrown instanceof S3ServiceException) {
S3ServiceException se = (S3ServiceException) thrown;
LOG.debug(
"S3ServiceException: {}: {} : {}",
se.getS3ErrorCode(), se.getS3ErrorMessage(), se, se);
if ("InvalidRange".equals(se.getS3ErrorCode())) {
result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
} else {
result = new S3Exception(se);
}
} else if (thrown instanceof ServiceException) {
ServiceException se = (ServiceException) thrown;
LOG.debug("S3ServiceException: {}: {} : {}",
se.getErrorCode(), se.toString(), se, se);
result = new S3Exception(se);
} else if (thrown instanceof IOException) {
result = (IOException) thrown;
} else {
// here there is no exception derived yet.
// this means no inner cause, and no translation made yet.
// convert the original to an IOException -rather than just the
// exception at the base of the tree
result = new S3Exception(original);
}
return result;
}
}
| 16,773 | 33.657025 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import java.io.BufferedOutputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.security.DigestOutputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BufferedFSInputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.s3.S3Exception;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link FileSystem} for reading and writing files stored on
* <a href="http://aws.amazon.com/s3">Amazon S3</a>.
* Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
* stores files on S3 in their
* native form so they can be read by other S3 tools.
* <p>
* A note about directories. S3 of course has no "native" support for them.
* The idiom we choose then is: for any directory created by this class,
* we use an empty object "#{dirpath}_$folder$" as a marker.
* Further, to interoperate with other S3 tools, we also accept the following:
* <ul>
* <li>an object "#{dirpath}/' denoting a directory marker</li>
* <li>
* if there exists any objects with the prefix "#{dirpath}/", then the
* directory is said to exist
* </li>
* <li>
* if both a file with the name of a directory and a marker for that
* directory exists, then the *file masks the directory*, and the directory
* is never returned.
* </li>
* </ul>
*
* @see org.apache.hadoop.fs.s3.S3FileSystem
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NativeS3FileSystem extends FileSystem {
public static final Logger LOG =
LoggerFactory.getLogger(NativeS3FileSystem.class);
private static final String FOLDER_SUFFIX = "_$folder$";
static final String PATH_DELIMITER = Path.SEPARATOR;
private static final int S3_MAX_LISTING_LENGTH = 1000;
static class NativeS3FsInputStream extends FSInputStream {
private NativeFileSystemStore store;
private Statistics statistics;
private InputStream in;
private final String key;
private long pos = 0;
public NativeS3FsInputStream(NativeFileSystemStore store, Statistics statistics, InputStream in, String key) {
Preconditions.checkNotNull(in, "Null input stream");
this.store = store;
this.statistics = statistics;
this.in = in;
this.key = key;
}
@Override
public synchronized int read() throws IOException {
int result;
try {
result = in.read();
} catch (IOException e) {
LOG.info("Received IOException while reading '{}', attempting to reopen",
key);
LOG.debug("{}", e, e);
try {
reopen(pos);
result = in.read();
} catch (EOFException eof) {
LOG.debug("EOF on input stream read: {}", eof, eof);
result = -1;
}
}
if (result != -1) {
pos++;
}
if (statistics != null && result != -1) {
statistics.incrementBytesRead(1);
}
return result;
}
@Override
public synchronized int read(byte[] b, int off, int len)
throws IOException {
if (in == null) {
throw new EOFException("Cannot read closed stream");
}
int result = -1;
try {
result = in.read(b, off, len);
} catch (EOFException eof) {
throw eof;
} catch (IOException e) {
LOG.info( "Received IOException while reading '{}'," +
" attempting to reopen.", key);
reopen(pos);
result = in.read(b, off, len);
}
if (result > 0) {
pos += result;
}
if (statistics != null && result > 0) {
statistics.incrementBytesRead(result);
}
return result;
}
@Override
public synchronized void close() throws IOException {
closeInnerStream();
}
/**
* Close the inner stream if not null. Even if an exception
* is raised during the close, the field is set to null
*/
private void closeInnerStream() {
IOUtils.closeStream(in);
in = null;
}
/**
* Reopen a new input stream with the specified position
* @param pos the position to reopen a new stream
* @throws IOException
*/
private synchronized void reopen(long pos) throws IOException {
LOG.debug("Reopening key '{}' for reading at position '{}", key, pos);
InputStream newStream = store.retrieve(key, pos);
updateInnerStream(newStream, pos);
}
/**
* Update inner stream with a new stream and position
* @param newStream new stream -must not be null
* @param newpos new position
* @throws IOException IO exception on a failure to close the existing
* stream.
*/
private synchronized void updateInnerStream(InputStream newStream, long newpos) throws IOException {
Preconditions.checkNotNull(newStream, "Null newstream argument");
closeInnerStream();
in = newStream;
this.pos = newpos;
}
@Override
public synchronized void seek(long newpos) throws IOException {
if (newpos < 0) {
throw new EOFException(
FSExceptionMessages.NEGATIVE_SEEK);
}
if (pos != newpos) {
// the seek is attempting to move the current position
reopen(newpos);
}
}
@Override
public synchronized long getPos() throws IOException {
return pos;
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
}
private class NativeS3FsOutputStream extends OutputStream {
private Configuration conf;
private String key;
private File backupFile;
private OutputStream backupStream;
private MessageDigest digest;
private boolean closed;
private LocalDirAllocator lDirAlloc;
public NativeS3FsOutputStream(Configuration conf,
NativeFileSystemStore store, String key, Progressable progress,
int bufferSize) throws IOException {
this.conf = conf;
this.key = key;
this.backupFile = newBackupFile();
LOG.info("OutputStream for key '" + key + "' writing to tempfile '" + this.backupFile + "'");
try {
this.digest = MessageDigest.getInstance("MD5");
this.backupStream = new BufferedOutputStream(new DigestOutputStream(
new FileOutputStream(backupFile), this.digest));
} catch (NoSuchAlgorithmException e) {
LOG.warn("Cannot load MD5 digest algorithm," +
"skipping message integrity check.", e);
this.backupStream = new BufferedOutputStream(
new FileOutputStream(backupFile));
}
}
private File newBackupFile() throws IOException {
if (lDirAlloc == null) {
lDirAlloc = new LocalDirAllocator("fs.s3.buffer.dir");
}
File result = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
result.deleteOnExit();
return result;
}
@Override
public void flush() throws IOException {
backupStream.flush();
}
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
backupStream.close();
LOG.info("OutputStream for key '{}' closed. Now beginning upload", key);
try {
byte[] md5Hash = digest == null ? null : digest.digest();
store.storeFile(key, backupFile, md5Hash);
} finally {
if (!backupFile.delete()) {
LOG.warn("Could not delete temporary s3n file: " + backupFile);
}
super.close();
closed = true;
}
LOG.info("OutputStream for key '{}' upload complete", key);
}
@Override
public void write(int b) throws IOException {
backupStream.write(b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
backupStream.write(b, off, len);
}
}
private URI uri;
private NativeFileSystemStore store;
private Path workingDir;
public NativeS3FileSystem() {
// set store in initialize()
}
public NativeS3FileSystem(NativeFileSystemStore store) {
this.store = store;
}
/**
* Return the protocol scheme for the FileSystem.
*
* @return <code>s3n</code>
*/
@Override
public String getScheme() {
return "s3n";
}
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
if (store == null) {
store = createDefaultStore(conf);
}
store.initialize(uri, conf);
setConf(conf);
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
this.workingDir =
new Path("/user", System.getProperty("user.name")).makeQualified(this.uri, this.getWorkingDirectory());
}
private static NativeFileSystemStore createDefaultStore(Configuration conf) {
NativeFileSystemStore store = new Jets3tNativeFileSystemStore();
RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
conf.getInt("fs.s3.maxRetries", 4),
conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(IOException.class, basePolicy);
exceptionToPolicyMap.put(S3Exception.class, basePolicy);
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap =
new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("storeFile", methodPolicy);
methodNameToPolicyMap.put("rename", methodPolicy);
return (NativeFileSystemStore)
RetryProxy.create(NativeFileSystemStore.class, store,
methodNameToPolicyMap);
}
private static String pathToKey(Path path) {
if (path.toUri().getScheme() != null && path.toUri().getPath().isEmpty()) {
// allow uris without trailing slash after bucket to refer to root,
// like s3n://mybucket
return "";
}
if (!path.isAbsolute()) {
throw new IllegalArgumentException("Path must be absolute: " + path);
}
String ret = path.toUri().getPath().substring(1); // remove initial slash
if (ret.endsWith("/") && (ret.indexOf("/") != ret.length() - 1)) {
ret = ret.substring(0, ret.length() -1);
}
return ret;
}
private static Path keyToPath(String key) {
return new Path("/" + key);
}
private Path makeAbsolute(Path path) {
if (path.isAbsolute()) {
return path;
}
return new Path(workingDir, path);
}
/** This optional operation is not yet supported. */
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
if (exists(f) && !overwrite) {
throw new FileAlreadyExistsException("File already exists: " + f);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Creating new file '" + f + "' in S3");
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataOutputStream(new NativeS3FsOutputStream(getConf(), store,
key, progress, bufferSize), statistics);
}
@Override
public boolean delete(Path f, boolean recurse) throws IOException {
FileStatus status;
try {
status = getFileStatus(f);
} catch (FileNotFoundException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Delete called for '" + f +
"' but file does not exist, so returning false");
}
return false;
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
if (status.isDirectory()) {
if (!recurse && listStatus(f).length > 0) {
throw new IOException("Can not delete " + f + " as is a not empty directory and recurse option is false");
}
createParent(f);
if(LOG.isDebugEnabled()) {
LOG.debug("Deleting directory '" + f + "'");
}
String priorLastKey = null;
do {
PartialListing listing = store.list(key, S3_MAX_LISTING_LENGTH, priorLastKey, true);
for (FileMetadata file : listing.getFiles()) {
store.delete(file.getKey());
}
priorLastKey = listing.getPriorLastKey();
} while (priorLastKey != null);
try {
store.delete(key + FOLDER_SUFFIX);
} catch (FileNotFoundException e) {
//this is fine, we don't require a marker
}
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Deleting file '" + f + "'");
}
createParent(f);
store.delete(key);
}
return true;
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
if (key.length() == 0) { // root always exists
return newDirectory(absolutePath);
}
if(LOG.isDebugEnabled()) {
LOG.debug("getFileStatus retrieving metadata for key '" + key + "'");
}
FileMetadata meta = store.retrieveMetadata(key);
if (meta != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("getFileStatus returning 'file' for key '" + key + "'");
}
return newFile(meta, absolutePath);
}
if (store.retrieveMetadata(key + FOLDER_SUFFIX) != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("getFileStatus returning 'directory' for key '" + key +
"' as '" + key + FOLDER_SUFFIX + "' exists");
}
return newDirectory(absolutePath);
}
if(LOG.isDebugEnabled()) {
LOG.debug("getFileStatus listing key '" + key + "'");
}
PartialListing listing = store.list(key, 1);
if (listing.getFiles().length > 0 ||
listing.getCommonPrefixes().length > 0) {
if(LOG.isDebugEnabled()) {
LOG.debug("getFileStatus returning 'directory' for key '" + key +
"' as it has contents");
}
return newDirectory(absolutePath);
}
if(LOG.isDebugEnabled()) {
LOG.debug("getFileStatus could not find key '" + key + "'");
}
throw new FileNotFoundException("No such file or directory '" + absolutePath + "'");
}
@Override
public URI getUri() {
return uri;
}
/**
* <p>
* If <code>f</code> is a file, this method will make a single call to S3.
* If <code>f</code> is a directory, this method will make a maximum of
* (<i>n</i> / 1000) + 2 calls to S3, where <i>n</i> is the total number of
* files and directories contained directly in <code>f</code>.
* </p>
*/
@Override
public FileStatus[] listStatus(Path f) throws IOException {
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
if (key.length() > 0) {
FileMetadata meta = store.retrieveMetadata(key);
if (meta != null) {
return new FileStatus[] { newFile(meta, absolutePath) };
}
}
URI pathUri = absolutePath.toUri();
Set<FileStatus> status = new TreeSet<FileStatus>();
String priorLastKey = null;
do {
PartialListing listing = store.list(key, S3_MAX_LISTING_LENGTH, priorLastKey, false);
for (FileMetadata fileMetadata : listing.getFiles()) {
Path subpath = keyToPath(fileMetadata.getKey());
String relativePath = pathUri.relativize(subpath.toUri()).getPath();
if (fileMetadata.getKey().equals(key + "/")) {
// this is just the directory we have been asked to list
}
else if (relativePath.endsWith(FOLDER_SUFFIX)) {
status.add(newDirectory(new Path(
absolutePath,
relativePath.substring(0, relativePath.indexOf(FOLDER_SUFFIX)))));
}
else {
status.add(newFile(fileMetadata, subpath));
}
}
for (String commonPrefix : listing.getCommonPrefixes()) {
Path subpath = keyToPath(commonPrefix);
String relativePath = pathUri.relativize(subpath.toUri()).getPath();
status.add(newDirectory(new Path(absolutePath, relativePath)));
}
priorLastKey = listing.getPriorLastKey();
} while (priorLastKey != null);
if (status.isEmpty() &&
key.length() > 0 &&
store.retrieveMetadata(key + FOLDER_SUFFIX) == null) {
throw new FileNotFoundException("File " + f + " does not exist.");
}
return status.toArray(new FileStatus[status.size()]);
}
private FileStatus newFile(FileMetadata meta, Path path) {
return new FileStatus(meta.getLength(), false, 1, getDefaultBlockSize(),
meta.getLastModified(), path.makeQualified(this.getUri(), this.getWorkingDirectory()));
}
private FileStatus newDirectory(Path path) {
return new FileStatus(0, true, 1, 0, 0, path.makeQualified(this.getUri(), this.getWorkingDirectory()));
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
Path absolutePath = makeAbsolute(f);
List<Path> paths = new ArrayList<Path>();
do {
paths.add(0, absolutePath);
absolutePath = absolutePath.getParent();
} while (absolutePath != null);
boolean result = true;
for (Path path : paths) {
result &= mkdir(path);
}
return result;
}
private boolean mkdir(Path f) throws IOException {
try {
FileStatus fileStatus = getFileStatus(f);
if (fileStatus.isFile()) {
throw new FileAlreadyExistsException(String.format(
"Can't make directory for path '%s' since it is a file.", f));
}
} catch (FileNotFoundException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Making dir '" + f + "' in S3");
}
String key = pathToKey(f) + FOLDER_SUFFIX;
store.storeEmptyFile(key);
}
return true;
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
FileStatus fs = getFileStatus(f); // will throw if the file doesn't exist
if (fs.isDirectory()) {
throw new FileNotFoundException("'" + f + "' is a directory");
}
LOG.info("Opening '" + f + "' for reading");
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataInputStream(new BufferedFSInputStream(
new NativeS3FsInputStream(store, statistics, store.retrieve(key), key), bufferSize));
}
// rename() and delete() use this method to ensure that the parent directory
// of the source does not vanish.
private void createParent(Path path) throws IOException {
Path parent = path.getParent();
if (parent != null) {
String key = pathToKey(makeAbsolute(parent));
if (key.length() > 0) {
store.storeEmptyFile(key + FOLDER_SUFFIX);
}
}
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
String srcKey = pathToKey(makeAbsolute(src));
if (srcKey.length() == 0) {
// Cannot rename root of file system
return false;
}
final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";
// Figure out the final destination
String dstKey;
try {
boolean dstIsFile = getFileStatus(dst).isFile();
if (dstIsFile) {
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble +
"returning false as dst is an already existing file");
}
return false;
} else {
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "using dst as output directory");
}
dstKey = pathToKey(makeAbsolute(new Path(dst, src.getName())));
}
} catch (FileNotFoundException e) {
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "using dst as output destination");
}
dstKey = pathToKey(makeAbsolute(dst));
try {
if (getFileStatus(dst.getParent()).isFile()) {
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble +
"returning false as dst parent exists and is a file");
}
return false;
}
} catch (FileNotFoundException ex) {
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble +
"returning false as dst parent does not exist");
}
return false;
}
}
boolean srcIsFile;
try {
srcIsFile = getFileStatus(src).isFile();
} catch (FileNotFoundException e) {
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "returning false as src does not exist");
}
return false;
}
if (srcIsFile) {
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble +
"src is file, so doing copy then delete in S3");
}
store.copy(srcKey, dstKey);
store.delete(srcKey);
} else {
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "src is directory, so copying contents");
}
store.storeEmptyFile(dstKey + FOLDER_SUFFIX);
List<String> keysToDelete = new ArrayList<String>();
String priorLastKey = null;
do {
PartialListing listing = store.list(srcKey, S3_MAX_LISTING_LENGTH, priorLastKey, true);
for (FileMetadata file : listing.getFiles()) {
keysToDelete.add(file.getKey());
store.copy(file.getKey(), dstKey + file.getKey().substring(srcKey.length()));
}
priorLastKey = listing.getPriorLastKey();
} while (priorLastKey != null);
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble +
"all files in src copied, now removing src files");
}
for (String key: keysToDelete) {
store.delete(key);
}
try {
store.delete(srcKey + FOLDER_SUFFIX);
} catch (FileNotFoundException e) {
//this is fine, we don't require a marker
}
if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "done");
}
}
return true;
}
@Override
public long getDefaultBlockSize() {
return getConf().getLong("fs.s3n.block.size", 64 * 1024 * 1024);
}
/**
* Set the working directory to the given directory.
*/
@Override
public void setWorkingDirectory(Path newDir) {
workingDir = newDir;
}
@Override
public Path getWorkingDirectory() {
return workingDir;
}
@Override
public String getCanonicalServiceName() {
// Does not support Token
return null;
}
}
| 24,797 | 31.163424 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3NativeFileSystemConfigKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/**
* This class contains constants for configuration keys used
* in the s3 file system.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class S3NativeFileSystemConfigKeys extends CommonConfigurationKeys {
public static final String S3_NATIVE_BLOCK_SIZE_KEY = "s3native.blocksize";
public static final long S3_NATIVE_BLOCK_SIZE_DEFAULT = 64*1024*1024;
public static final String S3_NATIVE_REPLICATION_KEY = "s3native.replication";
public static final short S3_NATIVE_REPLICATION_DEFAULT = 1;
public static final String S3_NATIVE_STREAM_BUFFER_SIZE_KEY =
"s3native.stream-buffer-size";
public static final int S3_NATIVE_STREAM_BUFFER_SIZE_DEFAULT = 4096;
public static final String S3_NATIVE_BYTES_PER_CHECKSUM_KEY =
"s3native.bytes-per-checksum";
public static final int S3_NATIVE_BYTES_PER_CHECKSUM_DEFAULT = 512;
public static final String S3_NATIVE_CLIENT_WRITE_PACKET_SIZE_KEY =
"s3native.client-write-packet-size";
public static final int S3_NATIVE_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
}
| 2,250 | 45.895833 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/FileMetadata.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* Holds basic metadata for a file stored in a {@link NativeFileSystemStore}.
* </p>
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class FileMetadata {
private final String key;
private final long length;
private final long lastModified;
public FileMetadata(String key, long length, long lastModified) {
this.key = key;
this.length = length;
this.lastModified = lastModified;
}
public String getKey() {
return key;
}
public long getLength() {
return length;
}
public long getLastModified() {
return lastModified;
}
@Override
public String toString() {
return "FileMetadata[" + key + ", " + length + ", " + lastModified + "]";
}
}
| 1,696 | 27.283333 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeFileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* <p>
* An abstraction for a key-based {@link File} store.
* </p>
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
interface NativeFileSystemStore {
void initialize(URI uri, Configuration conf) throws IOException;
void storeFile(String key, File file, byte[] md5Hash) throws IOException;
void storeEmptyFile(String key) throws IOException;
FileMetadata retrieveMetadata(String key) throws IOException;
InputStream retrieve(String key) throws IOException;
InputStream retrieve(String key, long byteRangeStart) throws IOException;
PartialListing list(String prefix, int maxListingLength) throws IOException;
PartialListing list(String prefix, int maxListingLength, String priorLastKey, boolean recursive)
throws IOException;
void delete(String key) throws IOException;
void copy(String srcKey, String dstKey) throws IOException;
/**
* Delete all keys with the given prefix. Used for testing.
* @throws IOException
*/
void purge(String prefix) throws IOException;
/**
* Diagnostic method to dump state to the console.
* @throws IOException
*/
void dump() throws IOException;
}
| 2,276 | 32.485294 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
public class TestSLSRunner {
@Test
@SuppressWarnings("all")
public void testSimulatorRunning() throws Exception {
File tempDir = new File("target", UUID.randomUUID().toString());
final List<Throwable> exceptionList =
Collections.synchronizedList(new ArrayList<Throwable>());
Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
exceptionList.add(e);
}
});
// start the simulator
File slsOutputDir = new File(tempDir.getAbsolutePath() + "/slsoutput/");
String args[] = new String[]{
"-inputrumen", "src/main/data/2jobs2min-rumen-jh.json",
"-output", slsOutputDir.getAbsolutePath()};
SLSRunner.main(args);
// wait for 20 seconds before stop
int count = 20;
while (count >= 0) {
Thread.sleep(1000);
if (! exceptionList.isEmpty()) {
SLSRunner.getRunner().stop();
Assert.fail("TestSLSRunner catched exception from child thread " +
"(TaskRunner.Task): " + exceptionList.get(0).getMessage());
break;
}
count--;
}
SLSRunner.getRunner().stop();
}
}
| 2,239 | 30.549296 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/scheduler/TestTaskRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
public class TestTaskRunner {
private TaskRunner runner;
@Before
public void setUp() {
runner = new TaskRunner();
runner.setQueueSize(5);
}
@After
public void cleanUp() {
runner.stop();
}
public static class SingleTask extends TaskRunner.Task {
public static CountDownLatch latch = new CountDownLatch(1);
public static boolean first;
public SingleTask(long startTime) {
super.init(startTime);
}
@Override
public void firstStep() {
if (first) {
Assert.fail();
}
first = true;
latch.countDown();
}
@Override
public void middleStep() {
Assert.fail();
}
@Override
public void lastStep() {
Assert.fail();
}
}
@Test
public void testSingleTask() throws Exception {
runner.start();
runner.schedule(new SingleTask(0));
SingleTask.latch.await(5000, TimeUnit.MILLISECONDS);
Assert.assertTrue(SingleTask.first);
}
public static class DualTask extends TaskRunner.Task {
public static CountDownLatch latch = new CountDownLatch(1);
public static boolean first;
public static boolean last;
public DualTask(long startTime, long endTime, long interval) {
super.init(startTime, endTime, interval);
}
@Override
public void firstStep() {
if (first) {
Assert.fail();
}
first = true;
}
@Override
public void middleStep() {
Assert.fail();
}
@Override
public void lastStep() {
if (last) {
Assert.fail();
}
last = true;
latch.countDown();
}
}
@Test
public void testDualTask() throws Exception {
runner.start();
runner.schedule(new DualTask(0, 10, 10));
DualTask.latch.await(5000, TimeUnit.MILLISECONDS);
Assert.assertTrue(DualTask.first);
Assert.assertTrue(DualTask.last);
}
public static class TriTask extends TaskRunner.Task {
public static CountDownLatch latch = new CountDownLatch(1);
public static boolean first;
public static boolean middle;
public static boolean last;
public TriTask(long startTime, long endTime, long interval) {
super.init(startTime, endTime, interval);
}
@Override
public void firstStep() {
if (first) {
Assert.fail();
}
first = true;
}
@Override
public void middleStep() {
if (middle) {
Assert.fail();
}
middle = true;
}
@Override
public void lastStep() {
if (last) {
Assert.fail();
}
last = true;
latch.countDown();
}
}
@Test
public void testTriTask() throws Exception {
runner.start();
runner.schedule(new TriTask(0, 10, 5));
TriTask.latch.await(5000, TimeUnit.MILLISECONDS);
Assert.assertTrue(TriTask.first);
Assert.assertTrue(TriTask.middle);
Assert.assertTrue(TriTask.last);
}
public static class MultiTask extends TaskRunner.Task {
public static CountDownLatch latch = new CountDownLatch(1);
public static boolean first;
public static int middle;
public static boolean last;
public MultiTask(long startTime, long endTime, long interval) {
super.init(startTime, endTime, interval);
}
@Override
public void firstStep() {
if (first) {
Assert.fail();
}
first = true;
}
@Override
public void middleStep() {
middle++;
}
@Override
public void lastStep() {
if (last) {
Assert.fail();
}
last = true;
latch.countDown();
}
}
@Test
public void testMultiTask() throws Exception {
runner.start();
runner.schedule(new MultiTask(0, 20, 5));
MultiTask.latch.await(5000, TimeUnit.MILLISECONDS);
Assert.assertTrue(MultiTask.first);
Assert.assertEquals((20 - 0) / 5 - 2 + 1, MultiTask.middle);
Assert.assertTrue(MultiTask.last);
}
public static class PreStartTask extends TaskRunner.Task {
public static CountDownLatch latch = new CountDownLatch(1);
public static boolean first;
public PreStartTask(long startTime) {
super.init(startTime);
}
@Override
public void firstStep() {
if (first) {
Assert.fail();
}
first = true;
latch.countDown();
}
@Override
public void middleStep() {
}
@Override
public void lastStep() {
}
}
@Test
public void testPreStartQueueing() throws Exception {
runner.schedule(new PreStartTask(210));
Thread.sleep(210);
runner.start();
long startedAt = System.currentTimeMillis();
PreStartTask.latch.await(5000, TimeUnit.MILLISECONDS);
long runAt = System.currentTimeMillis();
Assert.assertTrue(PreStartTask.first);
Assert.assertTrue(runAt - startedAt >= 200);
}
}
| 5,832 | 22.520161 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/web/TestSLSWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.web;
import org.junit.Assert;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.yarn.sls.SLSRunner;
import org.junit.Test;
import java.io.File;
import java.text.MessageFormat;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
public class TestSLSWebApp {
@Test
public void testSimulateInfoPageHtmlTemplate() throws Exception {
String simulateInfoTemplate = FileUtils.readFileToString(
new File("src/main/html/simulate.info.html.template"));
SLSRunner.simulateInfoMap.put("Number of racks", 10);
SLSRunner.simulateInfoMap.put("Number of nodes", 100);
SLSRunner.simulateInfoMap.put("Node memory (MB)", 1024);
SLSRunner.simulateInfoMap.put("Node VCores", 1);
SLSRunner.simulateInfoMap.put("Number of applications", 100);
SLSRunner.simulateInfoMap.put("Number of tasks", 1000);
SLSRunner.simulateInfoMap.put("Average tasks per applicaion", 10);
SLSRunner.simulateInfoMap.put("Number of queues", 4);
SLSRunner.simulateInfoMap.put("Average applications per queue", 25);
SLSRunner.simulateInfoMap.put("Estimated simulate time (s)", 10000);
StringBuilder info = new StringBuilder();
for (Map.Entry<String, Object> entry :
SLSRunner.simulateInfoMap.entrySet()) {
info.append("<tr>");
info.append("<td class='td1'>" + entry.getKey() + "</td>");
info.append("<td class='td2'>" + entry.getValue() + "</td>");
info.append("</tr>");
}
String simulateInfo =
MessageFormat.format(simulateInfoTemplate, info.toString());
Assert.assertTrue("The simulate info html page should not be empty",
simulateInfo.length() > 0);
for (Map.Entry<String, Object> entry :
SLSRunner.simulateInfoMap.entrySet()) {
Assert.assertTrue("The simulate info html page should have information "
+ "of " + entry.getKey(), simulateInfo.contains("<td class='td1'>"
+ entry.getKey() + "</td><td class='td2'>"
+ entry.getValue() + "</td>"));
}
}
@Test
public void testSimulatePageHtmlTemplate() throws Exception {
String simulateTemplate = FileUtils.readFileToString(
new File("src/main/html/simulate.html.template"));
Set<String> queues = new HashSet<String>();
queues.add("sls_queue_1");
queues.add("sls_queue_2");
queues.add("sls_queue_3");
String queueInfo = "";
int i = 0;
for (String queue : queues) {
queueInfo += "legends[4][" + i + "] = 'queue" + queue
+ ".allocated.memory'";
queueInfo += "legends[5][" + i + "] = 'queue" + queue
+ ".allocated.vcores'";
i ++;
}
String simulateInfo = MessageFormat.format(simulateTemplate,
queueInfo, "s", 1000, 1000);
Assert.assertTrue("The simulate page html page should not be empty",
simulateInfo.length() > 0);
}
@Test
public void testTrackPageHtmlTemplate() throws Exception {
String trackTemplate = FileUtils.readFileToString(
new File("src/main/html/track.html.template"));
String trackedQueueInfo = "";
Set<String> trackedQueues = new HashSet<String>();
trackedQueues.add("sls_queue_1");
trackedQueues.add("sls_queue_2");
trackedQueues.add("sls_queue_3");
for(String queue : trackedQueues) {
trackedQueueInfo += "<option value='Queue " + queue + "'>"
+ queue + "</option>";
}
String trackedAppInfo = "";
Set<String> trackedApps = new HashSet<String>();
trackedApps.add("app_1");
trackedApps.add("app_2");
for(String job : trackedApps) {
trackedAppInfo += "<option value='Job " + job + "'>" + job + "</option>";
}
String trackInfo = MessageFormat.format(trackTemplate, trackedQueueInfo,
trackedAppInfo, "s", 1000, 1000);
Assert.assertTrue("The queue/app tracking html page should not be empty",
trackInfo.length() > 0);
}
}
| 4,790 | 38.270492 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.nodemanager;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNMSimulator {
private final int GB = 1024;
private ResourceManager rm;
private YarnConfiguration conf;
@Before
public void setup() {
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_SCHEDULER,
"org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper");
conf.set(SLSConfiguration.RM_SCHEDULER,
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler");
conf.setBoolean(SLSConfiguration.METRICS_SWITCH, false);
rm = new ResourceManager();
rm.init(conf);
rm.start();
}
@Test
public void testNMSimulator() throws Exception {
// Register one node
NMSimulator node1 = new NMSimulator();
node1.init("rack1/node1", GB * 10, 10, 0, 1000, rm);
node1.middleStep();
int numClusterNodes = rm.getResourceScheduler().getNumClusterNodes();
int cumulativeSleepTime = 0;
int sleepInterval = 100;
while(numClusterNodes != 1 && cumulativeSleepTime < 5000) {
Thread.sleep(sleepInterval);
cumulativeSleepTime = cumulativeSleepTime + sleepInterval;
numClusterNodes = rm.getResourceScheduler().getNumClusterNodes();
}
Assert.assertEquals(1, rm.getResourceScheduler().getNumClusterNodes());
Assert.assertEquals(GB * 10,
rm.getResourceScheduler().getRootQueueMetrics().getAvailableMB());
Assert.assertEquals(10,
rm.getResourceScheduler().getRootQueueMetrics()
.getAvailableVirtualCores());
// Allocate one container on node1
ContainerId cId1 = newContainerId(1, 1, 1);
Container container1 = Container.newInstance(cId1, null, null,
Resources.createResource(GB, 1), null, null);
node1.addNewContainer(container1, 100000l);
Assert.assertTrue("Node1 should have one running container.",
node1.getRunningContainers().containsKey(cId1));
// Allocate one AM container on node1
ContainerId cId2 = newContainerId(2, 1, 1);
Container container2 = Container.newInstance(cId2, null, null,
Resources.createResource(GB, 1), null, null);
node1.addNewContainer(container2, -1l);
Assert.assertTrue("Node1 should have one running AM container",
node1.getAMContainers().contains(cId2));
// Remove containers
node1.cleanupContainer(cId1);
Assert.assertTrue("Container1 should be removed from Node1.",
node1.getCompletedContainers().contains(cId1));
node1.cleanupContainer(cId2);
Assert.assertFalse("Container2 should be removed from Node1.",
node1.getAMContainers().contains(cId2));
}
private ContainerId newContainerId(int appId, int appAttemptId, int cId) {
return BuilderUtils.newContainerId(
BuilderUtils.newApplicationAttemptId(
BuilderUtils.newApplicationId(System.currentTimeMillis(), appId),
appAttemptId), cId);
}
@After
public void tearDown() throws Exception {
rm.stop();
}
}
| 4,277 | 37.540541 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.appmaster;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class TestAMSimulator {
private ResourceManager rm;
private YarnConfiguration conf;
@Before
public void setup() {
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_SCHEDULER,
"org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper");
conf.set(SLSConfiguration.RM_SCHEDULER,
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler");
conf.setBoolean(SLSConfiguration.METRICS_SWITCH, false);
rm = new ResourceManager();
rm.init(conf);
rm.start();
}
class MockAMSimulator extends AMSimulator {
@Override
protected void processResponseQueue()
throws InterruptedException, YarnException, IOException {
}
@Override
protected void sendContainerRequest()
throws YarnException, IOException, InterruptedException {
}
@Override
protected void checkStop() {
}
}
@Test
public void testAMSimulator() throws Exception {
// Register one app
MockAMSimulator app = new MockAMSimulator();
List<ContainerSimulator> containers = new ArrayList<ContainerSimulator>();
app.init(1, 1000, containers, rm, null, 0, 1000000l, "user1", "default",
false, "app1");
app.firstStep();
Assert.assertEquals(1, rm.getRMContext().getRMApps().size());
Assert.assertNotNull(rm.getRMContext().getRMApps().get(app.appId));
// Finish this app
app.lastStep();
}
@After
public void tearDown() {
rm.stop();
}
}
| 2,821 | 31.436782 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.utils;
import org.junit.Assert;
import org.junit.Test;
public class TestSLSUtils {
@Test
public void testGetRackHostname() {
String str = "/rack1/node1";
String rackHostname[] = SLSUtils.getRackHostName(str);
Assert.assertEquals(rackHostname[0], "rack1");
Assert.assertEquals(rackHostname[1], "node1");
}
}
| 1,176 | 32.628571 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.sls.utils.SLSUtils;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectWriter;
@Private
@Unstable
public class RumenToSLSConverter {
private static final String EOL = System.getProperty("line.separator");
private static long baseline = 0;
private static Map<String, Set<String>> rackNodeMap =
new TreeMap<String, Set<String>>();
public static void main(String args[]) throws Exception {
Options options = new Options();
options.addOption("input", true, "input rumen json file");
options.addOption("outputJobs", true, "output jobs file");
options.addOption("outputNodes", true, "output nodes file");
CommandLineParser parser = new GnuParser();
CommandLine cmd = parser.parse(options, args);
if (! cmd.hasOption("input") ||
! cmd.hasOption("outputJobs") ||
! cmd.hasOption("outputNodes")) {
System.err.println();
System.err.println("ERROR: Missing input or output file");
System.err.println();
System.err.println("LoadGenerator creates a SLS script " +
"from a Hadoop Rumen output");
System.err.println();
System.err.println("Options: -input FILE -outputJobs FILE " +
"-outputNodes FILE");
System.err.println();
System.exit(1);
}
String inputFile = cmd.getOptionValue("input");
String outputJsonFile = cmd.getOptionValue("outputJobs");
String outputNodeFile = cmd.getOptionValue("outputNodes");
// check existing
if (! new File(inputFile).exists()) {
System.err.println();
System.err.println("ERROR: input does not exist");
System.exit(1);
}
if (new File(outputJsonFile).exists()) {
System.err.println();
System.err.println("ERROR: output job file is existing");
System.exit(1);
}
if (new File(outputNodeFile).exists()) {
System.err.println();
System.err.println("ERROR: output node file is existing");
System.exit(1);
}
File jsonFile = new File(outputJsonFile);
if (! jsonFile.getParentFile().exists()
&& ! jsonFile.getParentFile().mkdirs()) {
System.err.println("ERROR: Cannot create output directory in path: "
+ jsonFile.getParentFile().getAbsoluteFile());
System.exit(1);
}
File nodeFile = new File(outputNodeFile);
if (! nodeFile.getParentFile().exists()
&& ! nodeFile.getParentFile().mkdirs()) {
System.err.println("ERROR: Cannot create output directory in path: "
+ jsonFile.getParentFile().getAbsoluteFile());
System.exit(1);
}
generateSLSLoadFile(inputFile, outputJsonFile);
generateSLSNodeFile(outputNodeFile);
}
private static void generateSLSLoadFile(String inputFile, String outputFile)
throws IOException {
try (Reader input =
new InputStreamReader(new FileInputStream(inputFile), "UTF-8")) {
try (Writer output =
new OutputStreamWriter(new FileOutputStream(outputFile), "UTF-8")) {
ObjectMapper mapper = new ObjectMapper();
ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
Iterator<Map> i = mapper.readValues(
new JsonFactory().createJsonParser(input), Map.class);
while (i.hasNext()) {
Map m = i.next();
output.write(writer.writeValueAsString(createSLSJob(m)) + EOL);
}
}
}
}
@SuppressWarnings("unchecked")
private static void generateSLSNodeFile(String outputFile)
throws IOException {
try (Writer output =
new OutputStreamWriter(new FileOutputStream(outputFile), "UTF-8")) {
ObjectMapper mapper = new ObjectMapper();
ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
for (Map.Entry<String, Set<String>> entry : rackNodeMap.entrySet()) {
Map rack = new LinkedHashMap();
rack.put("rack", entry.getKey());
List nodes = new ArrayList();
for (String name : entry.getValue()) {
Map node = new LinkedHashMap();
node.put("node", name);
nodes.add(node);
}
rack.put("nodes", nodes);
output.write(writer.writeValueAsString(rack) + EOL);
}
}
}
@SuppressWarnings("unchecked")
private static Map createSLSJob(Map rumenJob) {
Map json = new LinkedHashMap();
long jobStart = (Long) rumenJob.get("submitTime");
long jobFinish = (Long) rumenJob.get("finishTime");
String jobId = rumenJob.get("jobID").toString();
String queue = rumenJob.get("queue").toString();
String user = rumenJob.get("user").toString();
if (baseline == 0) {
baseline = jobStart;
}
jobStart -= baseline;
jobFinish -= baseline;
long offset = 0;
if (jobStart < 0) {
System.out.println("Warning: reset job " + jobId + " start time to 0.");
offset = -jobStart;
jobFinish = jobFinish - jobStart;
jobStart = 0;
}
json.put("am.type", "mapreduce");
json.put("job.start.ms", jobStart);
json.put("job.end.ms", jobFinish);
json.put("job.queue.name", queue);
json.put("job.id", jobId);
json.put("job.user", user);
List maps = createSLSTasks("map",
(List) rumenJob.get("mapTasks"), offset);
List reduces = createSLSTasks("reduce",
(List) rumenJob.get("reduceTasks"), offset);
List tasks = new ArrayList();
tasks.addAll(maps);
tasks.addAll(reduces);
json.put("job.tasks", tasks);
return json;
}
@SuppressWarnings("unchecked")
private static List createSLSTasks(String taskType,
List rumenTasks, long offset) {
int priority = taskType.equals("reduce") ? 10 : 20;
List array = new ArrayList();
for (Object e : rumenTasks) {
Map rumenTask = (Map) e;
for (Object ee : (List) rumenTask.get("attempts")) {
Map rumenAttempt = (Map) ee;
long taskStart = (Long) rumenAttempt.get("startTime");
long taskFinish = (Long) rumenAttempt.get("finishTime");
String hostname = (String) rumenAttempt.get("hostName");
taskStart = taskStart - baseline + offset;
taskFinish = taskFinish - baseline + offset;
Map task = new LinkedHashMap();
task.put("container.host", hostname);
task.put("container.start.ms", taskStart);
task.put("container.end.ms", taskFinish);
task.put("container.priority", priority);
task.put("container.type", taskType);
array.add(task);
String rackHost[] = SLSUtils.getRackHostName(hostname);
if (rackNodeMap.containsKey(rackHost[0])) {
rackNodeMap.get(rackHost[0]).add(rackHost[1]);
} else {
Set<String> hosts = new TreeSet<String>();
hosts.add(rackHost[1]);
rackNodeMap.put(rackHost[0], hosts);
}
}
}
return array;
}
}
| 8,558 | 35.576923 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.rumen.JobTraceReader;
import org.apache.hadoop.tools.rumen.LoggedJob;
import org.apache.hadoop.tools.rumen.LoggedTask;
import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.sls.appmaster.AMSimulator;
import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
import org.apache.hadoop.yarn.sls.nodemanager.NMSimulator;
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
import org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper;
import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
import org.apache.hadoop.yarn.sls.utils.SLSUtils;
import org.apache.log4j.Logger;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.map.ObjectMapper;
@Private
@Unstable
public class SLSRunner {
// RM, Runner
private ResourceManager rm;
private static TaskRunner runner = new TaskRunner();
private String[] inputTraces;
private Configuration conf;
private Map<String, Integer> queueAppNumMap;
// NM simulator
private HashMap<NodeId, NMSimulator> nmMap;
private int nmMemoryMB, nmVCores;
private String nodeFile;
// AM simulator
private int AM_ID;
private Map<String, AMSimulator> amMap;
private Set<String> trackedApps;
private Map<String, Class> amClassMap;
private static int remainingApps = 0;
// metrics
private String metricsOutputDir;
private boolean printSimulation;
// other simulation information
private int numNMs, numRacks, numAMs, numTasks;
private long maxRuntime;
public final static Map<String, Object> simulateInfoMap =
new HashMap<String, Object>();
// logger
public final static Logger LOG = Logger.getLogger(SLSRunner.class);
// input traces, input-rumen or input-sls
private boolean isSLS;
public SLSRunner(boolean isSLS, String inputTraces[], String nodeFile,
String outputDir, Set<String> trackedApps,
boolean printsimulation)
throws IOException, ClassNotFoundException {
this.isSLS = isSLS;
this.inputTraces = inputTraces.clone();
this.nodeFile = nodeFile;
this.trackedApps = trackedApps;
this.printSimulation = printsimulation;
metricsOutputDir = outputDir;
nmMap = new HashMap<NodeId, NMSimulator>();
queueAppNumMap = new HashMap<String, Integer>();
amMap = new HashMap<String, AMSimulator>();
amClassMap = new HashMap<String, Class>();
// runner configuration
conf = new Configuration(false);
conf.addResource("sls-runner.xml");
// runner
int poolSize = conf.getInt(SLSConfiguration.RUNNER_POOL_SIZE,
SLSConfiguration.RUNNER_POOL_SIZE_DEFAULT);
SLSRunner.runner.setQueueSize(poolSize);
// <AMType, Class> map
for (Map.Entry e : conf) {
String key = e.getKey().toString();
if (key.startsWith(SLSConfiguration.AM_TYPE)) {
String amType = key.substring(SLSConfiguration.AM_TYPE.length());
amClassMap.put(amType, Class.forName(conf.get(key)));
}
}
}
public void start() throws Exception {
// start resource manager
startRM();
// start node managers
startNM();
// start application masters
startAM();
// set queue & tracked apps information
((ResourceSchedulerWrapper) rm.getResourceScheduler())
.setQueueSet(this.queueAppNumMap.keySet());
((ResourceSchedulerWrapper) rm.getResourceScheduler())
.setTrackedAppSet(this.trackedApps);
// print out simulation info
printSimulationInfo();
// blocked until all nodes RUNNING
waitForNodesRunning();
// starting the runner once everything is ready to go,
runner.start();
}
private void startRM() throws IOException, ClassNotFoundException {
Configuration rmConf = new YarnConfiguration();
String schedulerClass = rmConf.get(YarnConfiguration.RM_SCHEDULER);
rmConf.set(SLSConfiguration.RM_SCHEDULER, schedulerClass);
rmConf.set(YarnConfiguration.RM_SCHEDULER,
ResourceSchedulerWrapper.class.getName());
rmConf.set(SLSConfiguration.METRICS_OUTPUT_DIR, metricsOutputDir);
rm = new ResourceManager();
rm.init(rmConf);
rm.start();
}
private void startNM() throws YarnException, IOException {
// nm configuration
nmMemoryMB = conf.getInt(SLSConfiguration.NM_MEMORY_MB,
SLSConfiguration.NM_MEMORY_MB_DEFAULT);
nmVCores = conf.getInt(SLSConfiguration.NM_VCORES,
SLSConfiguration.NM_VCORES_DEFAULT);
int heartbeatInterval = conf.getInt(
SLSConfiguration.NM_HEARTBEAT_INTERVAL_MS,
SLSConfiguration.NM_HEARTBEAT_INTERVAL_MS_DEFAULT);
// nm information (fetch from topology file, or from sls/rumen json file)
Set<String> nodeSet = new HashSet<String>();
if (nodeFile.isEmpty()) {
if (isSLS) {
for (String inputTrace : inputTraces) {
nodeSet.addAll(SLSUtils.parseNodesFromSLSTrace(inputTrace));
}
} else {
for (String inputTrace : inputTraces) {
nodeSet.addAll(SLSUtils.parseNodesFromRumenTrace(inputTrace));
}
}
} else {
nodeSet.addAll(SLSUtils.parseNodesFromNodeFile(nodeFile));
}
// create NM simulators
Random random = new Random();
Set<String> rackSet = new HashSet<String>();
for (String hostName : nodeSet) {
// we randomize the heartbeat start time from zero to 1 interval
NMSimulator nm = new NMSimulator();
nm.init(hostName, nmMemoryMB, nmVCores,
random.nextInt(heartbeatInterval), heartbeatInterval, rm);
nmMap.put(nm.getNode().getNodeID(), nm);
runner.schedule(nm);
rackSet.add(nm.getNode().getRackName());
}
numRacks = rackSet.size();
numNMs = nmMap.size();
}
private void waitForNodesRunning() throws InterruptedException {
long startTimeMS = System.currentTimeMillis();
while (true) {
int numRunningNodes = 0;
for (RMNode node : rm.getRMContext().getRMNodes().values()) {
if (node.getState() == NodeState.RUNNING) {
numRunningNodes ++;
}
}
if (numRunningNodes == numNMs) {
break;
}
LOG.info(MessageFormat.format("SLSRunner is waiting for all " +
"nodes RUNNING. {0} of {1} NMs initialized.",
numRunningNodes, numNMs));
Thread.sleep(1000);
}
LOG.info(MessageFormat.format("SLSRunner takes {0} ms to launch all nodes.",
(System.currentTimeMillis() - startTimeMS)));
}
@SuppressWarnings("unchecked")
private void startAM() throws YarnException, IOException {
// application/container configuration
int heartbeatInterval = conf.getInt(
SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS,
SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS_DEFAULT);
int containerMemoryMB = conf.getInt(SLSConfiguration.CONTAINER_MEMORY_MB,
SLSConfiguration.CONTAINER_MEMORY_MB_DEFAULT);
int containerVCores = conf.getInt(SLSConfiguration.CONTAINER_VCORES,
SLSConfiguration.CONTAINER_VCORES_DEFAULT);
Resource containerResource =
BuilderUtils.newResource(containerMemoryMB, containerVCores);
// application workload
if (isSLS) {
startAMFromSLSTraces(containerResource, heartbeatInterval);
} else {
startAMFromRumenTraces(containerResource, heartbeatInterval);
}
numAMs = amMap.size();
remainingApps = numAMs;
}
/**
* parse workload information from sls trace files
*/
@SuppressWarnings("unchecked")
private void startAMFromSLSTraces(Resource containerResource,
int heartbeatInterval) throws IOException {
// parse from sls traces
JsonFactory jsonF = new JsonFactory();
ObjectMapper mapper = new ObjectMapper();
for (String inputTrace : inputTraces) {
Reader input =
new InputStreamReader(new FileInputStream(inputTrace), "UTF-8");
try {
Iterator<Map> i = mapper.readValues(jsonF.createJsonParser(input),
Map.class);
while (i.hasNext()) {
Map jsonJob = i.next();
// load job information
long jobStartTime = Long.parseLong(
jsonJob.get("job.start.ms").toString());
long jobFinishTime = Long.parseLong(
jsonJob.get("job.end.ms").toString());
String user = (String) jsonJob.get("job.user");
if (user == null) user = "default";
String queue = jsonJob.get("job.queue.name").toString();
String oldAppId = jsonJob.get("job.id").toString();
boolean isTracked = trackedApps.contains(oldAppId);
int queueSize = queueAppNumMap.containsKey(queue) ?
queueAppNumMap.get(queue) : 0;
queueSize ++;
queueAppNumMap.put(queue, queueSize);
// tasks
List tasks = (List) jsonJob.get("job.tasks");
if (tasks == null || tasks.size() == 0) {
continue;
}
List<ContainerSimulator> containerList =
new ArrayList<ContainerSimulator>();
for (Object o : tasks) {
Map jsonTask = (Map) o;
String hostname = jsonTask.get("container.host").toString();
long taskStart = Long.parseLong(
jsonTask.get("container.start.ms").toString());
long taskFinish = Long.parseLong(
jsonTask.get("container.end.ms").toString());
long lifeTime = taskFinish - taskStart;
int priority = Integer.parseInt(
jsonTask.get("container.priority").toString());
String type = jsonTask.get("container.type").toString();
containerList.add(new ContainerSimulator(containerResource,
lifeTime, hostname, priority, type));
}
// create a new AM
String amType = jsonJob.get("am.type").toString();
AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
amClassMap.get(amType), new Configuration());
if (amSim != null) {
amSim.init(AM_ID++, heartbeatInterval, containerList, rm,
this, jobStartTime, jobFinishTime, user, queue,
isTracked, oldAppId);
runner.schedule(amSim);
maxRuntime = Math.max(maxRuntime, jobFinishTime);
numTasks += containerList.size();
amMap.put(oldAppId, amSim);
}
}
} finally {
input.close();
}
}
}
/**
* parse workload information from rumen trace files
*/
@SuppressWarnings("unchecked")
private void startAMFromRumenTraces(Resource containerResource,
int heartbeatInterval)
throws IOException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///");
long baselineTimeMS = 0;
for (String inputTrace : inputTraces) {
File fin = new File(inputTrace);
JobTraceReader reader = new JobTraceReader(
new Path(fin.getAbsolutePath()), conf);
try {
LoggedJob job = null;
while ((job = reader.getNext()) != null) {
// only support MapReduce currently
String jobType = "mapreduce";
String user = job.getUser() == null ?
"default" : job.getUser().getValue();
String jobQueue = job.getQueue().getValue();
String oldJobId = job.getJobID().toString();
long jobStartTimeMS = job.getSubmitTime();
long jobFinishTimeMS = job.getFinishTime();
if (baselineTimeMS == 0) {
baselineTimeMS = jobStartTimeMS;
}
jobStartTimeMS -= baselineTimeMS;
jobFinishTimeMS -= baselineTimeMS;
if (jobStartTimeMS < 0) {
LOG.warn("Warning: reset job " + oldJobId + " start time to 0.");
jobFinishTimeMS = jobFinishTimeMS - jobStartTimeMS;
jobStartTimeMS = 0;
}
boolean isTracked = trackedApps.contains(oldJobId);
int queueSize = queueAppNumMap.containsKey(jobQueue) ?
queueAppNumMap.get(jobQueue) : 0;
queueSize ++;
queueAppNumMap.put(jobQueue, queueSize);
List<ContainerSimulator> containerList =
new ArrayList<ContainerSimulator>();
// map tasks
for(LoggedTask mapTask : job.getMapTasks()) {
LoggedTaskAttempt taskAttempt = mapTask.getAttempts()
.get(mapTask.getAttempts().size() - 1);
String hostname = taskAttempt.getHostName().getValue();
long containerLifeTime = taskAttempt.getFinishTime()
- taskAttempt.getStartTime();
containerList.add(new ContainerSimulator(containerResource,
containerLifeTime, hostname, 10, "map"));
}
// reduce tasks
for(LoggedTask reduceTask : job.getReduceTasks()) {
LoggedTaskAttempt taskAttempt = reduceTask.getAttempts()
.get(reduceTask.getAttempts().size() - 1);
String hostname = taskAttempt.getHostName().getValue();
long containerLifeTime = taskAttempt.getFinishTime()
- taskAttempt.getStartTime();
containerList.add(new ContainerSimulator(containerResource,
containerLifeTime, hostname, 20, "reduce"));
}
// create a new AM
AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
amClassMap.get(jobType), conf);
if (amSim != null) {
amSim.init(AM_ID ++, heartbeatInterval, containerList,
rm, this, jobStartTimeMS, jobFinishTimeMS, user, jobQueue,
isTracked, oldJobId);
runner.schedule(amSim);
maxRuntime = Math.max(maxRuntime, jobFinishTimeMS);
numTasks += containerList.size();
amMap.put(oldJobId, amSim);
}
}
} finally {
reader.close();
}
}
}
private void printSimulationInfo() {
if (printSimulation) {
// node
LOG.info("------------------------------------");
LOG.info(MessageFormat.format("# nodes = {0}, # racks = {1}, capacity " +
"of each node {2} MB memory and {3} vcores.",
numNMs, numRacks, nmMemoryMB, nmVCores));
LOG.info("------------------------------------");
// job
LOG.info(MessageFormat.format("# applications = {0}, # total " +
"tasks = {1}, average # tasks per application = {2}",
numAMs, numTasks, (int)(Math.ceil((numTasks + 0.0) / numAMs))));
LOG.info("JobId\tQueue\tAMType\tDuration\t#Tasks");
for (Map.Entry<String, AMSimulator> entry : amMap.entrySet()) {
AMSimulator am = entry.getValue();
LOG.info(entry.getKey() + "\t" + am.getQueue() + "\t" + am.getAMType()
+ "\t" + am.getDuration() + "\t" + am.getNumTasks());
}
LOG.info("------------------------------------");
// queue
LOG.info(MessageFormat.format("number of queues = {0} average " +
"number of apps = {1}", queueAppNumMap.size(),
(int)(Math.ceil((numAMs + 0.0) / queueAppNumMap.size()))));
LOG.info("------------------------------------");
// runtime
LOG.info(MessageFormat.format("estimated simulation time is {0}" +
" seconds", (long)(Math.ceil(maxRuntime / 1000.0))));
LOG.info("------------------------------------");
}
// package these information in the simulateInfoMap used by other places
simulateInfoMap.put("Number of racks", numRacks);
simulateInfoMap.put("Number of nodes", numNMs);
simulateInfoMap.put("Node memory (MB)", nmMemoryMB);
simulateInfoMap.put("Node VCores", nmVCores);
simulateInfoMap.put("Number of applications", numAMs);
simulateInfoMap.put("Number of tasks", numTasks);
simulateInfoMap.put("Average tasks per applicaion",
(int)(Math.ceil((numTasks + 0.0) / numAMs)));
simulateInfoMap.put("Number of queues", queueAppNumMap.size());
simulateInfoMap.put("Average applications per queue",
(int)(Math.ceil((numAMs + 0.0) / queueAppNumMap.size())));
simulateInfoMap.put("Estimated simulate time (s)",
(long)(Math.ceil(maxRuntime / 1000.0)));
}
public HashMap<NodeId, NMSimulator> getNmMap() {
return nmMap;
}
public static TaskRunner getRunner() {
return runner;
}
public static void decreaseRemainingApps() {
remainingApps --;
if (remainingApps == 0) {
LOG.info("SLSRunner tears down.");
System.exit(0);
}
}
public static void main(String args[]) throws Exception {
Options options = new Options();
options.addOption("inputrumen", true, "input rumen files");
options.addOption("inputsls", true, "input sls files");
options.addOption("nodes", true, "input topology");
options.addOption("output", true, "output directory");
options.addOption("trackjobs", true,
"jobs to be tracked during simulating");
options.addOption("printsimulation", false,
"print out simulation information");
CommandLineParser parser = new GnuParser();
CommandLine cmd = parser.parse(options, args);
String inputRumen = cmd.getOptionValue("inputrumen");
String inputSLS = cmd.getOptionValue("inputsls");
String output = cmd.getOptionValue("output");
if ((inputRumen == null && inputSLS == null) || output == null) {
System.err.println();
System.err.println("ERROR: Missing input or output file");
System.err.println();
System.err.println("Options: -inputrumen|-inputsls FILE,FILE... " +
"-output FILE [-nodes FILE] [-trackjobs JobId,JobId...] " +
"[-printsimulation]");
System.err.println();
System.exit(1);
}
File outputFile = new File(output);
if (! outputFile.exists()
&& ! outputFile.mkdirs()) {
System.err.println("ERROR: Cannot create output directory "
+ outputFile.getAbsolutePath());
System.exit(1);
}
Set<String> trackedJobSet = new HashSet<String>();
if (cmd.hasOption("trackjobs")) {
String trackjobs = cmd.getOptionValue("trackjobs");
String jobIds[] = trackjobs.split(",");
trackedJobSet.addAll(Arrays.asList(jobIds));
}
String nodeFile = cmd.hasOption("nodes") ? cmd.getOptionValue("nodes") : "";
boolean isSLS = inputSLS != null;
String inputFiles[] = isSLS ? inputSLS.split(",") : inputRumen.split(",");
SLSRunner sls = new SLSRunner(isSLS, inputFiles, nodeFile, output,
trackedJobSet, cmd.hasOption("printsimulation"));
sls.start();
}
}
| 21,039 | 38.548872 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ContainerSimulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
@Private
@Unstable
public class ContainerSimulator implements Delayed {
// id
private ContainerId id;
// resource allocated
private Resource resource;
// end time
private long endTime;
// life time (ms)
private long lifeTime;
// host name
private String hostname;
// priority
private int priority;
// type
private String type;
/**
* invoked when AM schedules containers to allocate
*/
public ContainerSimulator(Resource resource, long lifeTime,
String hostname, int priority, String type) {
this.resource = resource;
this.lifeTime = lifeTime;
this.hostname = hostname;
this.priority = priority;
this.type = type;
}
/**
* invoke when NM schedules containers to run
*/
public ContainerSimulator(ContainerId id, Resource resource, long endTime,
long lifeTime) {
this.id = id;
this.resource = resource;
this.endTime = endTime;
this.lifeTime = lifeTime;
}
public Resource getResource() {
return resource;
}
public ContainerId getId() {
return id;
}
@Override
public int compareTo(Delayed o) {
if (!(o instanceof ContainerSimulator)) {
throw new IllegalArgumentException(
"Parameter must be a ContainerSimulator instance");
}
ContainerSimulator other = (ContainerSimulator) o;
return (int) Math.signum(endTime - other.endTime);
}
@Override
public long getDelay(TimeUnit unit) {
return unit.convert(endTime - System.currentTimeMillis(),
TimeUnit.MILLISECONDS);
}
public long getLifeTime() {
return lifeTime;
}
public String getHostname() {
return hostname;
}
public long getEndTime() {
return endTime;
}
public int getPriority() {
return priority;
}
public String getType() {
return type;
}
public void setPriority(int p) {
priority = p;
}
}
| 3,065 | 24.983051 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.Queue;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.Delayed;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.exceptions.YarnException;
@Private
@Unstable
public class TaskRunner {
@Private
@Unstable
public abstract static class Task implements Runnable, Delayed {
private long start;
private long end;
private long nextRun;
private long startTime;
private long endTime;
private long repeatInterval;
private Queue<Task> queue;
public Task(){}
//values in milliseconds, start/end are milliseconds from now
public void init(long startTime, long endTime, long repeatInterval) {
if (endTime - startTime < 0) {
throw new IllegalArgumentException(MessageFormat.format(
"endTime[{0}] cannot be smaller than startTime[{1}]", endTime,
startTime));
}
if (repeatInterval < 0) {
throw new IllegalArgumentException(MessageFormat.format(
"repeatInterval[{0}] cannot be less than 1", repeatInterval));
}
if ((endTime - startTime) % repeatInterval != 0) {
throw new IllegalArgumentException(MessageFormat.format(
"Invalid parameters: (endTime[{0}] - startTime[{1}]) " +
"% repeatInterval[{2}] != 0",
endTime, startTime, repeatInterval));
}
start = startTime;
end = endTime;
this.repeatInterval = repeatInterval;
}
private void timeRebase(long now) {
startTime = now + start;
endTime = now + end;
this.nextRun = startTime;
}
//values in milliseconds, start is milliseconds from now
//it only executes firstStep()
public void init(long startTime) {
init(startTime, startTime, 1);
}
private void setQueue(Queue<Task> queue) {
this.queue = queue;
}
@Override
public final void run() {
try {
if (nextRun == startTime) {
firstStep();
nextRun += repeatInterval;
if (nextRun <= endTime) {
queue.add(this);
}
} else if (nextRun < endTime) {
middleStep();
nextRun += repeatInterval;
queue.add(this);
} else {
lastStep();
}
} catch (Exception e) {
e.printStackTrace();
Thread.getDefaultUncaughtExceptionHandler()
.uncaughtException(Thread.currentThread(), e);
}
}
@Override
public long getDelay(TimeUnit unit) {
return unit.convert(nextRun - System.currentTimeMillis(),
TimeUnit.MILLISECONDS);
}
@Override
public int compareTo(Delayed o) {
if (!(o instanceof Task)) {
throw new IllegalArgumentException("Parameter must be a Task instance");
}
Task other = (Task) o;
return (int) Math.signum(nextRun - other.nextRun);
}
public abstract void firstStep() throws Exception;
public abstract void middleStep() throws Exception;
public abstract void lastStep() throws Exception;
public void setEndTime(long et) {
endTime = et;
}
}
private DelayQueue queue;
private int threadPoolSize;
private ThreadPoolExecutor executor;
private long startTimeMS = 0;
public TaskRunner() {
queue = new DelayQueue();
}
public void setQueueSize(int threadPoolSize) {
this.threadPoolSize = threadPoolSize;
}
@SuppressWarnings("unchecked")
public void start() {
if (executor != null) {
throw new IllegalStateException("Already started");
}
DelayQueue preStartQueue = queue;
queue = new DelayQueue();
executor = new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 0,
TimeUnit.MILLISECONDS, queue);
executor.prestartAllCoreThreads();
startTimeMS = System.currentTimeMillis();
for (Object d : preStartQueue) {
schedule((Task) d, startTimeMS);
}
}
public void stop() {
executor.shutdownNow();
}
@SuppressWarnings("unchecked")
private void schedule(Task task, long timeNow) {
task.timeRebase(timeNow);
task.setQueue(queue);
queue.add(task);
}
public void schedule(Task task) {
schedule(task, System.currentTimeMillis());
}
public long getStartTimeMS() {
return this.startTimeMS;
}
}
| 5,404 | 28.05914 | 80 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.