repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.lang;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.util.Check;
import java.util.concurrent.Callable;
/**
* Adapter class that allows <code>Runnable</code>s and <code>Callable</code>s to
* be treated as the other.
*/
@InterfaceAudience.Private
public class RunnableCallable implements Callable<Void>, Runnable {
private Runnable runnable;
private Callable<?> callable;
/**
* Constructor that takes a runnable.
*
* @param runnable runnable.
*/
public RunnableCallable(Runnable runnable) {
this.runnable = Check.notNull(runnable, "runnable");
}
/**
* Constructor that takes a callable.
*
* @param callable callable.
*/
public RunnableCallable(Callable<?> callable) {
this.callable = Check.notNull(callable, "callable");
}
/**
* Invokes the wrapped callable/runnable as a callable.
*
* @return void
*
* @throws Exception thrown by the wrapped callable/runnable invocation.
*/
@Override
public Void call() throws Exception {
if (runnable != null) {
runnable.run();
} else {
callable.call();
}
return null;
}
/**
* Invokes the wrapped callable/runnable as a runnable.
*
* @throws RuntimeException thrown by the wrapped callable/runnable invocation.
*/
@Override
public void run() {
if (runnable != null) {
runnable.run();
} else {
try {
callable.call();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
}
/**
* Returns the class name of the wrapper callable/runnable.
*
* @return the class name of the wrapper callable/runnable.
*/
@Override
public String toString() {
return (runnable != null) ? runnable.getClass().getSimpleName() : callable.getClass().getSimpleName();
}
}
| 2,672 | 26.27551 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.IOException;
import java.util.List;
@InterfaceAudience.Private
public interface Groups {
public List<String> getGroups(String user) throws IOException;
}
| 1,082 | 32.84375 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.lang.XException;
@InterfaceAudience.Private
public class FileSystemAccessException extends XException {
public enum ERROR implements XException.ERROR {
H01("Service property [{0}] not defined"),
H02("Kerberos initialization failed, {0}"),
H03("FileSystemExecutor error, {0}"),
H04("Invalid configuration, it has not be created by the FileSystemAccessService"),
H05("[{0}] validation failed, {1}"),
H06("Property [{0}] not defined in configuration object"),
H07("[{0}] not healthy, {1}"),
H08("{0}"),
H09("Invalid FileSystemAccess security mode [{0}]"),
H10("Hadoop config directory not found [{0}]"),
H11("Could not load Hadoop config files, {0}");
private String template;
ERROR(String template) {
this.template = template;
}
@Override
public String getTemplate() {
return template;
}
}
public FileSystemAccessException(ERROR error, Object... params) {
super(error, params);
}
}
| 1,915 | 32.614035 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Instrumentation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import java.util.Map;
@InterfaceAudience.Private
public interface Instrumentation {
public interface Cron {
public Cron start();
public Cron stop();
}
public interface Variable<T> {
T getValue();
}
public Cron createCron();
public void incr(String group, String name, long count);
public void addCron(String group, String name, Cron cron);
public void addVariable(String group, String name, Variable<?> variable);
//sampling happens once a second
public void addSampler(String group, String name, int samplingSize, Variable<Long> variable);
public Map<String, Map<String, ?>> getSnapshot();
}
| 1,548 | 27.685185 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Scheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
@InterfaceAudience.Private
public interface Scheduler {
public abstract void schedule(Callable<?> callable, long delay, long interval, TimeUnit unit);
public abstract void schedule(Runnable runnable, long delay, long interval, TimeUnit unit);
}
| 1,237 | 35.411765 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import java.io.IOException;
@InterfaceAudience.Private
public interface FileSystemAccess {
public interface FileSystemExecutor<T> {
public T execute(FileSystem fs) throws IOException;
}
public <T> T execute(String user, Configuration conf, FileSystemExecutor<T> executor) throws
FileSystemAccessException;
public FileSystem createFileSystem(String user, Configuration conf) throws IOException, FileSystemAccessException;
public void releaseFileSystem(FileSystem fs) throws IOException;
public Configuration getFileSystemConfiguration();
}
| 1,560 | 33.688889 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.hadoop;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.Scheduler;
import org.apache.hadoop.lib.util.Check;
import org.apache.hadoop.lib.util.ConfigurationUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@InterfaceAudience.Private
public class FileSystemAccessService extends BaseService implements FileSystemAccess {
private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class);
public static final String PREFIX = "hadoop";
private static final String INSTRUMENTATION_GROUP = "hadoop";
public static final String AUTHENTICATION_TYPE = "authentication.type";
public static final String KERBEROS_KEYTAB = "authentication.kerberos.keytab";
public static final String KERBEROS_PRINCIPAL = "authentication.kerberos.principal";
public static final String FS_CACHE_PURGE_FREQUENCY = "filesystem.cache.purge.frequency";
public static final String FS_CACHE_PURGE_TIMEOUT = "filesystem.cache.purge.timeout";
public static final String NAME_NODE_WHITELIST = "name.node.whitelist";
public static final String HADOOP_CONF_DIR = "config.dir";
private static final String[] HADOOP_CONF_FILES = {"core-site.xml", "hdfs-site.xml"};
private static final String FILE_SYSTEM_SERVICE_CREATED = "FileSystemAccessService.created";
private static class CachedFileSystem {
private FileSystem fs;
private long lastUse;
private long timeout;
private int count;
public CachedFileSystem(long timeout) {
this.timeout = timeout;
lastUse = -1;
count = 0;
}
synchronized FileSystem getFileSytem(Configuration conf)
throws IOException {
if (fs == null) {
fs = FileSystem.get(conf);
}
lastUse = -1;
count++;
return fs;
}
synchronized void release() throws IOException {
count--;
if (count == 0) {
if (timeout == 0) {
fs.close();
fs = null;
lastUse = -1;
}
else {
lastUse = System.currentTimeMillis();
}
}
}
// to avoid race conditions in the map cache adding removing entries
// an entry in the cache remains forever, it just closes/opens filesystems
// based on their utilization. Worse case scenario, the penalty we'll
// pay is that the amount of entries in the cache will be the total
// number of users in HDFS (which seems a resonable overhead).
synchronized boolean purgeIfIdle() throws IOException {
boolean ret = false;
if (count == 0 && lastUse != -1 &&
(System.currentTimeMillis() - lastUse) > timeout) {
fs.close();
fs = null;
lastUse = -1;
ret = true;
}
return ret;
}
}
public FileSystemAccessService() {
super(PREFIX);
}
private Collection<String> nameNodeWhitelist;
Configuration serviceHadoopConf;
private AtomicInteger unmanagedFileSystems = new AtomicInteger();
private ConcurrentHashMap<String, CachedFileSystem> fsCache =
new ConcurrentHashMap<String, CachedFileSystem>();
private long purgeTimeout;
@Override
protected void init() throws ServiceException {
LOG.info("Using FileSystemAccess JARs version [{}]", VersionInfo.getVersion());
String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim();
if (security.equals("kerberos")) {
String defaultName = getServer().getName();
String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab";
keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
if (keytab.length() == 0) {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_KEYTAB);
}
String principal = defaultName + "/localhost@LOCALHOST";
principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
if (principal.length() == 0) {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
}
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
try {
UserGroupInformation.loginUserFromKeytab(principal, keytab);
} catch (IOException ex) {
throw new ServiceException(FileSystemAccessException.ERROR.H02, ex.getMessage(), ex);
}
LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
} else if (security.equals("simple")) {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "simple");
UserGroupInformation.setConfiguration(conf);
LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
} else {
throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
}
String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR, getServer().getConfigDir());
File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile();
if (!hadoopConfDir.exists()) {
hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile();
}
if (!hadoopConfDir.exists()) {
throw new ServiceException(FileSystemAccessException.ERROR.H10, hadoopConfDir);
}
try {
serviceHadoopConf = loadHadoopConf(hadoopConfDir);
} catch (IOException ex) {
throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex);
}
LOG.debug("FileSystemAccess FileSystem configuration:");
for (Map.Entry entry : serviceHadoopConf) {
LOG.debug(" {} = {}", entry.getKey(), entry.getValue());
}
setRequiredServiceHadoopConf(serviceHadoopConf);
nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
}
private Configuration loadHadoopConf(File dir) throws IOException {
Configuration hadoopConf = new Configuration(false);
for (String file : HADOOP_CONF_FILES) {
File f = new File(dir, file);
if (f.exists()) {
hadoopConf.addResource(new Path(f.getAbsolutePath()));
}
}
return hadoopConf;
}
@Override
public void postInit() throws ServiceException {
super.postInit();
Instrumentation instrumentation = getServer().get(Instrumentation.class);
instrumentation.addVariable(INSTRUMENTATION_GROUP, "unmanaged.fs", new Instrumentation.Variable<Integer>() {
@Override
public Integer getValue() {
return unmanagedFileSystems.get();
}
});
instrumentation.addSampler(INSTRUMENTATION_GROUP, "unmanaged.fs", 60, new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return (long) unmanagedFileSystems.get();
}
});
Scheduler scheduler = getServer().get(Scheduler.class);
int purgeInterval = getServiceConfig().getInt(FS_CACHE_PURGE_FREQUENCY, 60);
purgeTimeout = getServiceConfig().getLong(FS_CACHE_PURGE_TIMEOUT, 60);
purgeTimeout = (purgeTimeout > 0) ? purgeTimeout : 0;
if (purgeTimeout > 0) {
scheduler.schedule(new FileSystemCachePurger(),
purgeInterval, purgeInterval, TimeUnit.SECONDS);
}
}
private class FileSystemCachePurger implements Runnable {
@Override
public void run() {
int count = 0;
for (CachedFileSystem cacheFs : fsCache.values()) {
try {
count += cacheFs.purgeIfIdle() ? 1 : 0;
} catch (Throwable ex) {
LOG.warn("Error while purging filesystem, " + ex.toString(), ex);
}
}
LOG.debug("Purged [{}} filesystem instances", count);
}
}
private Set<String> toLowerCase(Collection<String> collection) {
Set<String> set = new HashSet<String>();
for (String value : collection) {
set.add(StringUtils.toLowerCase(value));
}
return set;
}
@Override
public Class getInterface() {
return FileSystemAccess.class;
}
@Override
public Class[] getServiceDependencies() {
return new Class[]{Instrumentation.class, Scheduler.class};
}
protected UserGroupInformation getUGI(String user) throws IOException {
return UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
}
protected void setRequiredServiceHadoopConf(Configuration conf) {
conf.set("fs.hdfs.impl.disable.cache", "true");
}
private static final String HTTPFS_FS_USER = "httpfs.fs.user";
protected FileSystem createFileSystem(Configuration namenodeConf)
throws IOException {
String user = UserGroupInformation.getCurrentUser().getShortUserName();
CachedFileSystem newCachedFS = new CachedFileSystem(purgeTimeout);
CachedFileSystem cachedFS = fsCache.putIfAbsent(user, newCachedFS);
if (cachedFS == null) {
cachedFS = newCachedFS;
}
Configuration conf = new Configuration(namenodeConf);
conf.set(HTTPFS_FS_USER, user);
return cachedFS.getFileSytem(conf);
}
protected void closeFileSystem(FileSystem fs) throws IOException {
if (fsCache.containsKey(fs.getConf().get(HTTPFS_FS_USER))) {
fsCache.get(fs.getConf().get(HTTPFS_FS_USER)).release();
}
}
protected void validateNamenode(String namenode) throws FileSystemAccessException {
if (nameNodeWhitelist.size() > 0 && !nameNodeWhitelist.contains("*")) {
if (!nameNodeWhitelist.contains(
StringUtils.toLowerCase(namenode))) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H05, namenode, "not in whitelist");
}
}
}
protected void checkNameNodeHealth(FileSystem fileSystem) throws FileSystemAccessException {
}
@Override
public <T> T execute(String user, final Configuration conf, final FileSystemExecutor<T> executor)
throws FileSystemAccessException {
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
Check.notNull(executor, "executor");
if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
}
if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null ||
conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY).length() == 0) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06,
CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
}
try {
validateNamenode(
new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).
getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
FileSystem fs = createFileSystem(conf);
Instrumentation instrumentation = getServer().get(Instrumentation.class);
Instrumentation.Cron cron = instrumentation.createCron();
try {
checkNameNodeHealth(fs);
cron.start();
return executor.execute(fs);
} finally {
cron.stop();
instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron);
closeFileSystem(fs);
}
}
});
} catch (FileSystemAccessException ex) {
throw ex;
} catch (Exception ex) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H03, ex);
}
}
public FileSystem createFileSystemInternal(String user, final Configuration conf)
throws IOException, FileSystemAccessException {
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
}
try {
validateNamenode(
new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return createFileSystem(conf);
}
});
} catch (IOException ex) {
throw ex;
} catch (FileSystemAccessException ex) {
throw ex;
} catch (Exception ex) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H08, ex.getMessage(), ex);
}
}
@Override
public FileSystem createFileSystem(String user, final Configuration conf) throws IOException,
FileSystemAccessException {
unmanagedFileSystems.incrementAndGet();
return createFileSystemInternal(user, conf);
}
@Override
public void releaseFileSystem(FileSystem fs) throws IOException {
unmanagedFileSystems.decrementAndGet();
closeFileSystem(fs);
}
@Override
public Configuration getFileSystemConfiguration() {
Configuration conf = new Configuration(true);
ConfigurationUtils.copy(serviceHadoopConf, conf);
conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
// Force-clear server-side umask to make HttpFS match WebHDFS behavior
conf.set(FsPermission.UMASK_LABEL, "000");
return conf;
}
}
| 15,097 | 35.914425 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.scheduler;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.lang.RunnableCallable;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.Scheduler;
import org.apache.hadoop.lib.util.Check;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.MessageFormat;
import java.util.concurrent.Callable;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@InterfaceAudience.Private
public class SchedulerService extends BaseService implements Scheduler {
private static final Logger LOG = LoggerFactory.getLogger(SchedulerService.class);
private static final String INST_GROUP = "scheduler";
public static final String PREFIX = "scheduler";
public static final String CONF_THREADS = "threads";
private ScheduledExecutorService scheduler;
public SchedulerService() {
super(PREFIX);
}
@Override
public void init() throws ServiceException {
int threads = getServiceConfig().getInt(CONF_THREADS, 5);
scheduler = new ScheduledThreadPoolExecutor(threads);
LOG.debug("Scheduler started");
}
@Override
public void destroy() {
try {
long limit = Time.now() + 30 * 1000;
scheduler.shutdownNow();
while (!scheduler.awaitTermination(1000, TimeUnit.MILLISECONDS)) {
LOG.debug("Waiting for scheduler to shutdown");
if (Time.now() > limit) {
LOG.warn("Gave up waiting for scheduler to shutdown");
break;
}
}
if (scheduler.isTerminated()) {
LOG.debug("Scheduler shutdown");
}
} catch (InterruptedException ex) {
LOG.warn(ex.getMessage(), ex);
}
}
@Override
public Class[] getServiceDependencies() {
return new Class[]{Instrumentation.class};
}
@Override
public Class getInterface() {
return Scheduler.class;
}
@Override
public void schedule(final Callable<?> callable, long delay, long interval, TimeUnit unit) {
Check.notNull(callable, "callable");
if (!scheduler.isShutdown()) {
LOG.debug("Scheduling callable [{}], interval [{}] seconds, delay [{}] in [{}]",
new Object[]{callable, delay, interval, unit});
Runnable r = new Runnable() {
@Override
public void run() {
String instrName = callable.getClass().getSimpleName();
Instrumentation instr = getServer().get(Instrumentation.class);
if (getServer().getStatus() == Server.Status.HALTED) {
LOG.debug("Skipping [{}], server status [{}]", callable, getServer().getStatus());
instr.incr(INST_GROUP, instrName + ".skips", 1);
} else {
LOG.debug("Executing [{}]", callable);
instr.incr(INST_GROUP, instrName + ".execs", 1);
Instrumentation.Cron cron = instr.createCron().start();
try {
callable.call();
} catch (Exception ex) {
instr.incr(INST_GROUP, instrName + ".fails", 1);
LOG.error("Error executing [{}], {}", new Object[]{callable, ex.getMessage(), ex});
} finally {
instr.addCron(INST_GROUP, instrName, cron.stop());
}
}
}
};
scheduler.scheduleWithFixedDelay(r, delay, interval, unit);
} else {
throw new IllegalStateException(
MessageFormat.format("Scheduler shutting down, ignoring scheduling of [{}]", callable));
}
}
@Override
public void schedule(Runnable runnable, long delay, long interval, TimeUnit unit) {
schedule((Callable<?>) new RunnableCallable(runnable), delay, interval, unit);
}
}
| 4,761 | 34.537313 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.util.ConfigurationUtils;
import java.io.IOException;
import java.util.List;
@InterfaceAudience.Private
public class GroupsService extends BaseService implements Groups {
private static final String PREFIX = "groups";
private org.apache.hadoop.security.Groups hGroups;
public GroupsService() {
super(PREFIX);
}
@Override
protected void init() throws ServiceException {
Configuration hConf = new Configuration(false);
ConfigurationUtils.copy(getServiceConfig(), hConf);
hGroups = new org.apache.hadoop.security.Groups(hConf);
}
@Override
public Class getInterface() {
return Groups.class;
}
@Override
public List<String> getGroups(String user) throws IOException {
return hGroups.getGroups(user);
}
}
| 1,895 | 31.135593 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.instrumentation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.Scheduler;
import org.apache.hadoop.util.Time;
import org.json.simple.JSONAware;
import org.json.simple.JSONObject;
import org.json.simple.JSONStreamAware;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@InterfaceAudience.Private
public class InstrumentationService extends BaseService implements Instrumentation {
public static final String PREFIX = "instrumentation";
public static final String CONF_TIMERS_SIZE = "timers.size";
private int timersSize;
private Lock counterLock;
private Lock timerLock;
private Lock variableLock;
private Lock samplerLock;
private Map<String, Map<String, AtomicLong>> counters;
private Map<String, Map<String, Timer>> timers;
private Map<String, Map<String, VariableHolder>> variables;
private Map<String, Map<String, Sampler>> samplers;
private List<Sampler> samplersList;
private Map<String, Map<String, ?>> all;
public InstrumentationService() {
super(PREFIX);
}
@Override
@SuppressWarnings("unchecked")
public void init() throws ServiceException {
timersSize = getServiceConfig().getInt(CONF_TIMERS_SIZE, 10);
counterLock = new ReentrantLock();
timerLock = new ReentrantLock();
variableLock = new ReentrantLock();
samplerLock = new ReentrantLock();
Map<String, VariableHolder> jvmVariables = new ConcurrentHashMap<String, VariableHolder>();
counters = new ConcurrentHashMap<String, Map<String, AtomicLong>>();
timers = new ConcurrentHashMap<String, Map<String, Timer>>();
variables = new ConcurrentHashMap<String, Map<String, VariableHolder>>();
samplers = new ConcurrentHashMap<String, Map<String, Sampler>>();
samplersList = new ArrayList<Sampler>();
all = new LinkedHashMap<String, Map<String, ?>>();
all.put("os-env", System.getenv());
all.put("sys-props", (Map<String, ?>) (Map) System.getProperties());
all.put("jvm", jvmVariables);
all.put("counters", (Map) counters);
all.put("timers", (Map) timers);
all.put("variables", (Map) variables);
all.put("samplers", (Map) samplers);
jvmVariables.put("free.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().freeMemory();
}
}));
jvmVariables.put("max.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().maxMemory();
}
}));
jvmVariables.put("total.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().totalMemory();
}
}));
}
@Override
public void postInit() throws ServiceException {
Scheduler scheduler = getServer().get(Scheduler.class);
if (scheduler != null) {
scheduler.schedule(new SamplersRunnable(), 0, 1, TimeUnit.SECONDS);
}
}
@Override
public Class getInterface() {
return Instrumentation.class;
}
@SuppressWarnings("unchecked")
private <T> T getToAdd(String group, String name, Class<T> klass, Lock lock, Map<String, Map<String, T>> map) {
boolean locked = false;
try {
Map<String, T> groupMap = map.get(group);
if (groupMap == null) {
lock.lock();
locked = true;
groupMap = map.get(group);
if (groupMap == null) {
groupMap = new ConcurrentHashMap<String, T>();
map.put(group, groupMap);
}
}
T element = groupMap.get(name);
if (element == null) {
if (!locked) {
lock.lock();
locked = true;
}
element = groupMap.get(name);
if (element == null) {
try {
if (klass == Timer.class) {
element = (T) new Timer(timersSize);
} else {
element = klass.newInstance();
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
groupMap.put(name, element);
}
}
return element;
} finally {
if (locked) {
lock.unlock();
}
}
}
static class Cron implements Instrumentation.Cron {
long start;
long lapStart;
long own;
long total;
@Override
public Cron start() {
if (total != 0) {
throw new IllegalStateException("Cron already used");
}
if (start == 0) {
start = Time.now();
lapStart = start;
} else if (lapStart == 0) {
lapStart = Time.now();
}
return this;
}
@Override
public Cron stop() {
if (total != 0) {
throw new IllegalStateException("Cron already used");
}
if (lapStart > 0) {
own += Time.now() - lapStart;
lapStart = 0;
}
return this;
}
void end() {
stop();
total = Time.now() - start;
}
}
static class Timer implements JSONAware, JSONStreamAware {
static final int LAST_TOTAL = 0;
static final int LAST_OWN = 1;
static final int AVG_TOTAL = 2;
static final int AVG_OWN = 3;
Lock lock = new ReentrantLock();
private long[] own;
private long[] total;
private int last;
private boolean full;
private int size;
public Timer(int size) {
this.size = size;
own = new long[size];
total = new long[size];
for (int i = 0; i < size; i++) {
own[i] = -1;
total[i] = -1;
}
last = -1;
}
long[] getValues() {
lock.lock();
try {
long[] values = new long[4];
values[LAST_TOTAL] = total[last];
values[LAST_OWN] = own[last];
int limit = (full) ? size : (last + 1);
for (int i = 0; i < limit; i++) {
values[AVG_TOTAL] += total[i];
values[AVG_OWN] += own[i];
}
values[AVG_TOTAL] = values[AVG_TOTAL] / limit;
values[AVG_OWN] = values[AVG_OWN] / limit;
return values;
} finally {
lock.unlock();
}
}
void addCron(Cron cron) {
cron.end();
lock.lock();
try {
last = (last + 1) % size;
full = full || last == (size - 1);
total[last] = cron.total;
own[last] = cron.own;
} finally {
lock.unlock();
}
}
@SuppressWarnings("unchecked")
private JSONObject getJSON() {
long[] values = getValues();
JSONObject json = new JSONObject();
json.put("lastTotal", values[0]);
json.put("lastOwn", values[1]);
json.put("avgTotal", values[2]);
json.put("avgOwn", values[3]);
return json;
}
@Override
public String toJSONString() {
return getJSON().toJSONString();
}
@Override
public void writeJSONString(Writer out) throws IOException {
getJSON().writeJSONString(out);
}
}
@Override
public Cron createCron() {
return new Cron();
}
@Override
public void incr(String group, String name, long count) {
AtomicLong counter = getToAdd(group, name, AtomicLong.class, counterLock, counters);
counter.addAndGet(count);
}
@Override
public void addCron(String group, String name, Instrumentation.Cron cron) {
Timer timer = getToAdd(group, name, Timer.class, timerLock, timers);
timer.addCron((Cron) cron);
}
static class VariableHolder<E> implements JSONAware, JSONStreamAware {
Variable<E> var;
public VariableHolder() {
}
public VariableHolder(Variable<E> var) {
this.var = var;
}
@SuppressWarnings("unchecked")
private JSONObject getJSON() {
JSONObject json = new JSONObject();
json.put("value", var.getValue());
return json;
}
@Override
public String toJSONString() {
return getJSON().toJSONString();
}
@Override
public void writeJSONString(Writer out) throws IOException {
out.write(toJSONString());
}
}
@Override
public void addVariable(String group, String name, Variable<?> variable) {
VariableHolder holder = getToAdd(group, name, VariableHolder.class, variableLock, variables);
holder.var = variable;
}
static class Sampler implements JSONAware, JSONStreamAware {
Variable<Long> variable;
long[] values;
private AtomicLong sum;
private int last;
private boolean full;
void init(int size, Variable<Long> variable) {
this.variable = variable;
values = new long[size];
sum = new AtomicLong();
last = 0;
}
void sample() {
int index = last;
long valueGoingOut = values[last];
full = full || last == (values.length - 1);
last = (last + 1) % values.length;
values[index] = variable.getValue();
sum.addAndGet(-valueGoingOut + values[index]);
}
double getRate() {
return ((double) sum.get()) / ((full) ? values.length : ((last == 0) ? 1 : last));
}
@SuppressWarnings("unchecked")
private JSONObject getJSON() {
JSONObject json = new JSONObject();
json.put("sampler", getRate());
json.put("size", (full) ? values.length : last);
return json;
}
@Override
public String toJSONString() {
return getJSON().toJSONString();
}
@Override
public void writeJSONString(Writer out) throws IOException {
out.write(toJSONString());
}
}
@Override
public void addSampler(String group, String name, int samplingSize, Variable<Long> variable) {
Sampler sampler = getToAdd(group, name, Sampler.class, samplerLock, samplers);
samplerLock.lock();
try {
sampler.init(samplingSize, variable);
samplersList.add(sampler);
} finally {
samplerLock.unlock();
}
}
class SamplersRunnable implements Runnable {
@Override
public void run() {
samplerLock.lock();
try {
for (Sampler sampler : samplersList) {
sampler.sample();
}
} finally {
samplerLock.unlock();
}
}
}
@Override
public Map<String, Map<String, ?>> getSnapshot() {
return all;
}
}
| 11,522 | 26.968447 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.util;
import org.apache.hadoop.classification.InterfaceAudience;
import java.text.MessageFormat;
import java.util.List;
import java.util.regex.Pattern;
/**
* Utility methods to check preconditions.
* <p>
* Commonly used for method arguments preconditions.
*/
@InterfaceAudience.Private
public class Check {
/**
* Verifies a variable is not NULL.
*
* @param obj the variable to check.
* @param name the name to use in the exception message.
*
* @return the variable.
*
* @throws IllegalArgumentException if the variable is NULL.
*/
public static <T> T notNull(T obj, String name) {
if (obj == null) {
throw new IllegalArgumentException(name + " cannot be null");
}
return obj;
}
/**
* Verifies a list does not have any NULL elements.
*
* @param list the list to check.
* @param name the name to use in the exception message.
*
* @return the list.
*
* @throws IllegalArgumentException if the list has NULL elements.
*/
public static <T> List<T> notNullElements(List<T> list, String name) {
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notNull(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
}
/**
* Verifies a string is not NULL and not emtpy
*
* @param str the variable to check.
* @param name the name to use in the exception message.
*
* @return the variable.
*
* @throws IllegalArgumentException if the variable is NULL or empty.
*/
public static String notEmpty(String str, String name) {
if (str == null) {
throw new IllegalArgumentException(name + " cannot be null");
}
if (str.length() == 0) {
throw new IllegalArgumentException(name + " cannot be empty");
}
return str;
}
/**
* Verifies a string list is not NULL and not emtpy
*
* @param list the list to check.
* @param name the name to use in the exception message.
*
* @return the variable.
*
* @throws IllegalArgumentException if the string list has NULL or empty
* elements.
*/
public static List<String> notEmptyElements(List<String> list, String name) {
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notEmpty(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
}
private static final String IDENTIFIER_PATTERN_STR = "[a-zA-z_][a-zA-Z0-9_\\-]*";
private static final Pattern IDENTIFIER_PATTERN = Pattern.compile("^" + IDENTIFIER_PATTERN_STR + "$");
/**
* Verifies a value is a valid identifier,
* <code>[a-zA-z_][a-zA-Z0-9_\-]*</code>, up to a maximum length.
*
* @param value string to check if it is a valid identifier.
* @param maxLen maximun length.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the string is not a valid identifier.
*/
public static String validIdentifier(String value, int maxLen, String name) {
Check.notEmpty(value, name);
if (value.length() > maxLen) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] = [{1}] exceeds max len [{2}]", name, value, maxLen));
}
if (!IDENTIFIER_PATTERN.matcher(value).find()) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] = [{1}] must be '{2}'", name, value, IDENTIFIER_PATTERN_STR));
}
return value;
}
/**
* Verifies an integer is greater than zero.
*
* @param value integer value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the integer is zero or less.
*/
public static int gt0(int value, String name) {
return (int) gt0((long) value, name);
}
/**
* Verifies an long is greater than zero.
*
* @param value long value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the long is zero or less.
*/
public static long gt0(long value, String name) {
if (value <= 0) {
throw new IllegalArgumentException(
MessageFormat.format("parameter [{0}] = [{1}] must be greater than zero", name, value));
}
return value;
}
/**
* Verifies an integer is greater or equal to zero.
*
* @param value integer value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the integer is greater or equal to zero.
*/
public static int ge0(int value, String name) {
return (int) ge0((long) value, name);
}
/**
* Verifies an long is greater or equal to zero.
*
* @param value integer value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the long is greater or equal to zero.
*/
public static long ge0(long value, String name) {
if (value < 0) {
throw new IllegalArgumentException(MessageFormat.format(
"parameter [{0}] = [{1}] must be greater than or equals zero", name, value));
}
return value;
}
}
| 6,066 | 28.8867 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.w3c.dom.DOMException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
import org.xml.sax.SAXException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
/**
* Configuration utilities.
*/
@InterfaceAudience.Private
public abstract class ConfigurationUtils {
/**
* Copy configuration key/value pairs from one configuration to another if a property exists in the target, it gets
* replaced.
*
* @param source source configuration.
* @param target target configuration.
*/
public static void copy(Configuration source, Configuration target) {
Check.notNull(source, "source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {
target.set(entry.getKey(), entry.getValue());
}
}
/**
* Injects configuration key/value pairs from one configuration to another if the key does not exist in the target
* configuration.
*
* @param source source configuration.
* @param target target configuration.
*/
public static void injectDefaults(Configuration source, Configuration target) {
Check.notNull(source, "source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {
if (target.get(entry.getKey()) == null) {
target.set(entry.getKey(), entry.getValue());
}
}
}
/**
* Returns a new ConfigurationUtils instance with all inline values resolved.
*
* @return a new ConfigurationUtils instance with all inline values resolved.
*/
public static Configuration resolve(Configuration conf) {
Configuration resolved = new Configuration(false);
for (Map.Entry<String, String> entry : conf) {
resolved.set(entry.getKey(), conf.get(entry.getKey()));
}
return resolved;
}
// Canibalized from FileSystemAccess <code>Configuration.loadResource()</code>.
/**
* Create a configuration from an InputStream.
* <p>
* ERROR canibalized from <code>Configuration.loadResource()</code>.
*
* @param is inputstream to read the configuration from.
*
* @throws IOException thrown if the configuration could not be read.
*/
public static void load(Configuration conf, InputStream is) throws IOException {
try {
DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance();
// ignore all comments inside the xml file
docBuilderFactory.setIgnoringComments(true);
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document doc = builder.parse(is);
parseDocument(conf, doc);
} catch (SAXException e) {
throw new IOException(e);
} catch (ParserConfigurationException e) {
throw new IOException(e);
}
}
// Canibalized from FileSystemAccess <code>Configuration.loadResource()</code>.
private static void parseDocument(Configuration conf, Document doc) throws IOException {
try {
Element root = doc.getDocumentElement();
if (!"configuration".equals(root.getTagName())) {
throw new IOException("bad conf file: top-level element not <configuration>");
}
NodeList props = root.getChildNodes();
for (int i = 0; i < props.getLength(); i++) {
Node propNode = props.item(i);
if (!(propNode instanceof Element)) {
continue;
}
Element prop = (Element) propNode;
if (!"property".equals(prop.getTagName())) {
throw new IOException("bad conf file: element not <property>");
}
NodeList fields = prop.getChildNodes();
String attr = null;
String value = null;
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element)) {
continue;
}
Element field = (Element) fieldNode;
if ("name".equals(field.getTagName()) && field.hasChildNodes()) {
attr = ((Text) field.getFirstChild()).getData().trim();
}
if ("value".equals(field.getTagName()) && field.hasChildNodes()) {
value = ((Text) field.getFirstChild()).getData();
}
}
if (attr != null && value != null) {
conf.set(attr, value);
}
}
} catch (DOMException e) {
throw new IOException(e);
}
}
}
| 5,520 | 33.50625 | 117 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServerException;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.text.MessageFormat;
/**
* {@link Server} subclass that implements <code>ServletContextListener</code>
* and uses its lifecycle to start and stop the server.
*/
@InterfaceAudience.Private
public abstract class ServerWebApp extends Server implements ServletContextListener {
private static final String HOME_DIR = ".home.dir";
private static final String CONFIG_DIR = ".config.dir";
private static final String LOG_DIR = ".log.dir";
private static final String TEMP_DIR = ".temp.dir";
private static final String HTTP_HOSTNAME = ".http.hostname";
private static final String HTTP_PORT = ".http.port";
public static final String SSL_ENABLED = ".ssl.enabled";
private static final ThreadLocal<String> HOME_DIR_TL =
new ThreadLocal<String>();
private InetSocketAddress authority;
/**
* Method for testing purposes.
*/
public static void setHomeDirForCurrentThread(String homeDir) {
HOME_DIR_TL.set(homeDir);
}
/**
* Constructor for testing purposes.
*/
protected ServerWebApp(String name, String homeDir, String configDir, String logDir, String tempDir,
Configuration config) {
super(name, homeDir, configDir, logDir, tempDir, config);
}
/**
* Constructor for testing purposes.
*/
protected ServerWebApp(String name, String homeDir, Configuration config) {
super(name, homeDir, config);
}
/**
* Constructor. Subclasses must have a default constructor specifying
* the server name.
* <p>
* The server name is used to resolve the Java System properties that define
* the server home, config, log and temp directories.
* <p>
* The home directory is looked in the Java System property
* <code>#SERVER_NAME#.home.dir</code>.
* <p>
* The config directory is looked in the Java System property
* <code>#SERVER_NAME#.config.dir</code>, if not defined it resolves to
* the <code>#SERVER_HOME_DIR#/conf</code> directory.
* <p>
* The log directory is looked in the Java System property
* <code>#SERVER_NAME#.log.dir</code>, if not defined it resolves to
* the <code>#SERVER_HOME_DIR#/log</code> directory.
* <p>
* The temp directory is looked in the Java System property
* <code>#SERVER_NAME#.temp.dir</code>, if not defined it resolves to
* the <code>#SERVER_HOME_DIR#/temp</code> directory.
*
* @param name server name.
*/
public ServerWebApp(String name) {
super(name, getHomeDir(name),
getDir(name, CONFIG_DIR, getHomeDir(name) + "/conf"),
getDir(name, LOG_DIR, getHomeDir(name) + "/log"),
getDir(name, TEMP_DIR, getHomeDir(name) + "/temp"), null);
}
/**
* Returns the server home directory.
* <p>
* It is looked up in the Java System property
* <code>#SERVER_NAME#.home.dir</code>.
*
* @param name the server home directory.
*
* @return the server home directory.
*/
static String getHomeDir(String name) {
String homeDir = HOME_DIR_TL.get();
if (homeDir == null) {
String sysProp = name + HOME_DIR;
homeDir = System.getProperty(sysProp);
if (homeDir == null) {
throw new IllegalArgumentException(MessageFormat.format("System property [{0}] not defined", sysProp));
}
}
return homeDir;
}
/**
* Convenience method that looks for Java System property defining a
* diretory and if not present defaults to the specified directory.
*
* @param name server name, used as prefix of the Java System property.
* @param dirType dir type, use as postfix of the Java System property.
* @param defaultDir the default directory to return if the Java System
* property <code>name + dirType</code> is not defined.
*
* @return the directory defined in the Java System property or the
* the default directory if the Java System property is not defined.
*/
static String getDir(String name, String dirType, String defaultDir) {
String sysProp = name + dirType;
return System.getProperty(sysProp, defaultDir);
}
/**
* Initializes the <code>ServletContextListener</code> which initializes
* the Server.
*
* @param event servelt context event.
*/
@Override
public void contextInitialized(ServletContextEvent event) {
try {
init();
} catch (ServerException ex) {
event.getServletContext().log("ERROR: " + ex.getMessage());
throw new RuntimeException(ex);
}
}
/**
* Resolves the host and port InetSocketAddress the web server is listening to.
* <p>
* This implementation looks for the following 2 properties:
* <ul>
* <li>#SERVER_NAME#.http.hostname</li>
* <li>#SERVER_NAME#.http.port</li>
* </ul>
*
* @return the host and port InetSocketAddress the web server is listening to.
* @throws ServerException thrown if any of the above 2 properties is not defined.
*/
protected InetSocketAddress resolveAuthority() throws ServerException {
String hostnameKey = getName() + HTTP_HOSTNAME;
String portKey = getName() + HTTP_PORT;
String host = System.getProperty(hostnameKey);
String port = System.getProperty(portKey);
if (host == null) {
throw new ServerException(ServerException.ERROR.S13, hostnameKey);
}
if (port == null) {
throw new ServerException(ServerException.ERROR.S13, portKey);
}
try {
InetAddress add = InetAddress.getByName(host);
int portNum = Integer.parseInt(port);
return new InetSocketAddress(add, portNum);
} catch (UnknownHostException ex) {
throw new ServerException(ServerException.ERROR.S14, ex.toString(), ex);
}
}
/**
* Destroys the <code>ServletContextListener</code> which destroys
* the Server.
*
* @param event servelt context event.
*/
@Override
public void contextDestroyed(ServletContextEvent event) {
destroy();
}
/**
* Returns the hostname:port InetSocketAddress the webserver is listening to.
*
* @return the hostname:port InetSocketAddress the webserver is listening to.
*/
public InetSocketAddress getAuthority() throws ServerException {
synchronized (this) {
if (authority == null) {
authority = resolveAuthority();
}
}
return authority;
}
/**
* Sets an alternate hostname:port InetSocketAddress to use.
* <p>
* For testing purposes.
*
* @param authority alterante authority.
*/
@VisibleForTesting
public void setAuthority(InetSocketAddress authority) {
this.authority = authority;
}
/**
*
*/
public boolean isSslEnabled() {
return Boolean.valueOf(System.getProperty(getName() + SSL_ENABLED, "false"));
}
}
| 7,976 | 32.376569 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.MDC;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.security.Principal;
/**
* Filter that sets request contextual information for the slf4j MDC.
* <p>
* It sets the following values:
* <ul>
* <li>hostname: if the {@link HostnameFilter} is present and configured
* before this filter</li>
* <li>user: the <code>HttpServletRequest.getUserPrincipal().getName()</code></li>
* <li>method: the HTTP method fo the request (GET, POST, ...)</li>
* <li>path: the path of the request URL</li>
* </ul>
*/
@InterfaceAudience.Private
public class MDCFilter implements Filter {
/**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param config filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
}
/**
* Sets the slf4j <code>MDC</code> and delegates the request to the chain.
*
* @param request servlet request.
* @param response servlet response.
* @param chain filter chain.
*
* @throws IOException thrown if an IO error occurs.
* @throws ServletException thrown if a servlet error occurs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
try {
MDC.clear();
String hostname = HostnameFilter.get();
if (hostname != null) {
MDC.put("hostname", HostnameFilter.get());
}
Principal principal = ((HttpServletRequest) request).getUserPrincipal();
String user = (principal != null) ? principal.getName() : null;
if (user != null) {
MDC.put("user", user);
}
MDC.put("method", ((HttpServletRequest) request).getMethod());
MDC.put("path", ((HttpServletRequest) request).getPathInfo());
chain.doFilter(request, response);
} finally {
MDC.clear();
}
}
/**
* Destroys the filter.
* <p>
* This implementation is a NOP.
*/
@Override
public void destroy() {
}
}
| 3,251 | 30.269231 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.lib.service.FileSystemAccess;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
/**
* The <code>FileSystemReleaseFilter</code> releases back to the
* {@link FileSystemAccess} service a <code>FileSystem</code> instance.
* <p>
* This filter is useful in situations where a servlet request
* is streaming out HDFS data and the corresponding filesystem
* instance have to be closed after the streaming completes.
*/
@InterfaceAudience.Private
public abstract class FileSystemReleaseFilter implements Filter {
private static final ThreadLocal<FileSystem> FILE_SYSTEM_TL = new ThreadLocal<FileSystem>();
/**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param filterConfig filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
/**
* It delegates the incoming request to the <code>FilterChain</code>, and
* at its completion (in a finally block) releases the filesystem instance
* back to the {@link FileSystemAccess} service.
*
* @param servletRequest servlet request.
* @param servletResponse servlet response.
* @param filterChain filter chain.
*
* @throws IOException thrown if an IO error occurs.
* @throws ServletException thrown if a servlet error occurs.
*/
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
throws IOException, ServletException {
try {
filterChain.doFilter(servletRequest, servletResponse);
} finally {
FileSystem fs = FILE_SYSTEM_TL.get();
if (fs != null) {
FILE_SYSTEM_TL.remove();
getFileSystemAccess().releaseFileSystem(fs);
}
}
}
/**
* Destroys the filter.
* <p>
* This implementation is a NOP.
*/
@Override
public void destroy() {
}
/**
* Static method that sets the <code>FileSystem</code> to release back to
* the {@link FileSystemAccess} service on servlet request completion.
*
* @param fs fileystem instance.
*/
public static void setFileSystem(FileSystem fs) {
FILE_SYSTEM_TL.set(fs);
}
/**
* Abstract method to be implemetned by concrete implementations of the
* filter that return the {@link FileSystemAccess} service to which the filesystem
* will be returned to.
*
* @return the FileSystemAccess service.
*/
protected abstract FileSystemAccess getFileSystemAccess();
}
| 3,700 | 31.752212 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import org.apache.hadoop.classification.InterfaceAudience;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Filter that resolves the requester hostname.
*/
@InterfaceAudience.Private
public class HostnameFilter implements Filter {
static final ThreadLocal<String> HOSTNAME_TL = new ThreadLocal<String>();
private static final Logger log = LoggerFactory.getLogger(HostnameFilter.class);
/**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param config filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
}
/**
* Resolves the requester hostname and delegates the request to the chain.
* <p>
* The requester hostname is available via the {@link #get} method.
*
* @param request servlet request.
* @param response servlet response.
* @param chain filter chain.
*
* @throws IOException thrown if an IO error occurs.
* @throws ServletException thrown if a servlet error occurs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
try {
String hostname;
try {
String address = request.getRemoteAddr();
if (address != null) {
hostname = InetAddress.getByName(address).getCanonicalHostName();
} else {
log.warn("Request remote address is NULL");
hostname = "???";
}
} catch (UnknownHostException ex) {
log.warn("Request remote address could not be resolved, {0}", ex.toString(), ex);
hostname = "???";
}
HOSTNAME_TL.set(hostname);
chain.doFilter(request, response);
} finally {
HOSTNAME_TL.remove();
}
}
/**
* Returns the requester hostname.
*
* @return the requester hostname.
*/
public static String get() {
return HOSTNAME_TL.get();
}
/**
* Destroys the filter.
* <p>
* This implementation is a NOP.
*/
@Override
public void destroy() {
}
}
| 3,302 | 28.756757 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.util.ConfigurationUtils;
import java.util.Map;
/**
* Convenience class implementing the {@link Service} interface.
*/
@InterfaceAudience.Private
public abstract class BaseService implements Service {
private String prefix;
private Server server;
private Configuration serviceConfig;
/**
* Service constructor.
*
* @param prefix service prefix.
*/
public BaseService(String prefix) {
this.prefix = prefix;
}
/**
* Initializes the service.
* <p>
* It collects all service properties (properties having the
* <code>#SERVER#.#SERVICE#.</code> prefix). The property names are then
* trimmed from the <code>#SERVER#.#SERVICE#.</code> prefix.
* <p>
* After collecting the service properties it delegates to the
* {@link #init()} method.
*
* @param server the server initializing the service, give access to the
* server context.
*
* @throws ServiceException thrown if the service could not be initialized.
*/
@Override
public final void init(Server server) throws ServiceException {
this.server = server;
String servicePrefix = getPrefixedName("");
serviceConfig = new Configuration(false);
for (Map.Entry<String, String> entry : ConfigurationUtils.resolve(server.getConfig())) {
String key = entry.getKey();
if (key.startsWith(servicePrefix)) {
serviceConfig.set(key.substring(servicePrefix.length()), entry.getValue());
}
}
init();
}
/**
* Post initializes the service. This method is called by the
* {@link Server} after all services of the server have been initialized.
* <p>
* This method does a NOP.
*
* @throws ServiceException thrown if the service could not be
* post-initialized.
*/
@Override
public void postInit() throws ServiceException {
}
/**
* Destroy the services. This method is called once, when the
* {@link Server} owning the service is being destroyed.
* <p>
* This method does a NOP.
*/
@Override
public void destroy() {
}
/**
* Returns the service dependencies of this service. The service will be
* instantiated only if all the service dependencies are already initialized.
* <p>
* This method returns an empty array (size 0)
*
* @return an empty array (size 0).
*/
@Override
public Class[] getServiceDependencies() {
return new Class[0];
}
/**
* Notification callback when the server changes its status.
* <p>
* This method returns an empty array (size 0)
*
* @param oldStatus old server status.
* @param newStatus new server status.
*
* @throws ServiceException thrown if the service could not process the status change.
*/
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
}
/**
* Returns the service prefix.
*
* @return the service prefix.
*/
protected String getPrefix() {
return prefix;
}
/**
* Returns the server owning the service.
*
* @return the server owning the service.
*/
protected Server getServer() {
return server;
}
/**
* Returns the full prefixed name of a service property.
*
* @param name of the property.
*
* @return prefixed name of the property.
*/
protected String getPrefixedName(String name) {
return server.getPrefixedName(prefix + "." + name);
}
/**
* Returns the service configuration properties. Property
* names are trimmed off from its prefix.
* <p>
* The sevice configuration properties are all properties
* with names starting with <code>#SERVER#.#SERVICE#.</code>
* in the server configuration.
*
* @return the service configuration properties with names
* trimmed off from their <code>#SERVER#.#SERVICE#.</code>
* prefix.
*/
protected Configuration getServiceConfig() {
return serviceConfig;
}
/**
* Initializes the server.
* <p>
* This method is called by {@link #init(Server)} after all service properties
* (properties prefixed with
*
* @throws ServiceException thrown if the service could not be initialized.
*/
protected abstract void init() throws ServiceException;
}
| 5,202 | 27.745856 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Service.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Service interface for components to be managed by the {@link Server} class.
*/
@InterfaceAudience.Private
public interface Service {
/**
* Initializes the service. This method is called once, when the
* {@link Server} owning the service is being initialized.
*
* @param server the server initializing the service, give access to the
* server context.
*
* @throws ServiceException thrown if the service could not be initialized.
*/
public void init(Server server) throws ServiceException;
/**
* Post initializes the service. This method is called by the
* {@link Server} after all services of the server have been initialized.
*
* @throws ServiceException thrown if the service could not be
* post-initialized.
*/
public void postInit() throws ServiceException;
/**
* Destroy the services. This method is called once, when the
* {@link Server} owning the service is being destroyed.
*/
public void destroy();
/**
* Returns the service dependencies of this service. The service will be
* instantiated only if all the service dependencies are already initialized.
*
* @return the service dependencies.
*/
public Class[] getServiceDependencies();
/**
* Returns the interface implemented by this service. This interface is used
* the {@link Server} when the {@link Server#get(Class)} method is used to
* retrieve a service.
*
* @return the interface that identifies the service.
*/
public Class getInterface();
/**
* Notification callback when the server changes its status.
*
* @param oldStatus old server status.
* @param newStatus new server status.
*
* @throws ServiceException thrown if the service could not process the status change.
*/
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException;
}
| 2,799 | 32.73494 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.lang.XException;
/**
* Exception thrown by the {@link Server} class.
*/
@InterfaceAudience.Private
public class ServerException extends XException {
/**
* Error codes use by the {@link Server} class.
*/
@InterfaceAudience.Private
public static enum ERROR implements XException.ERROR {
S01("Dir [{0}] does not exist"),
S02("[{0}] is not a directory"),
S03("Could not load file from classpath [{0}], {1}"),
S04("Service [{0}] does not implement declared interface [{1}]"),
S05("[{0}] is not a file"),
S06("Could not load file [{0}], {1}"),
S07("Could not instanciate service class [{0}], {1}"),
S08("Could not load service classes, {0}"),
S09("Could not set service [{0}] programmatically -server shutting down-, {1}"),
S10("Service [{0}] requires service [{1}]"),
S11("Service [{0}] exception during status change to [{1}] -server shutting down-, {2}"),
S12("Could not start service [{0}], {1}"),
S13("Missing system property [{0}]"),
S14("Could not initialize server, {0}")
;
private String msg;
/**
* Constructor for the error code enum.
*
* @param msg message template.
*/
private ERROR(String msg) {
this.msg = msg;
}
/**
* Returns the message template for the error code.
*
* @return the message template for the error code.
*/
@Override
public String getTemplate() {
return msg;
}
}
/**
* Constructor for sub-classes.
*
* @param error error code for the XException.
* @param params parameters to use when creating the error message
* with the error code template.
*/
protected ServerException(XException.ERROR error, Object... params) {
super(error, params);
}
/**
* Creates an server exception using the specified error code.
* The exception message is resolved using the error code template
* and the passed parameters.
*
* @param error error code for the XException.
* @param params parameters to use when creating the error message
* with the error code template.
*/
public ServerException(ERROR error, Object... params) {
super(error, params);
}
}
| 3,123 | 30.877551 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServiceException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.lang.XException;
/**
* Exception thrown by {@link Service} implementations.
*/
@InterfaceAudience.Private
public class ServiceException extends ServerException {
/**
* Creates an service exception using the specified error code.
* The exception message is resolved using the error code template
* and the passed parameters.
*
* @param error error code for the XException.
* @param params parameters to use when creating the error message
* with the error code template.
*/
public ServiceException(XException.ERROR error, Object... params) {
super(error, params);
}
}
| 1,541 | 34.045455 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.util.Check;
import org.apache.hadoop.lib.util.ConfigurationUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* A Server class provides standard configuration, logging and {@link Service}
* lifecyle management.
* <p>
* A Server normally has a home directory, a configuration directory, a temp
* directory and logs directory.
* <p>
* The Server configuration is loaded from 2 overlapped files,
* <code>#SERVER#-default.xml</code> and <code>#SERVER#-site.xml</code>. The
* default file is loaded from the classpath, the site file is laoded from the
* configuration directory.
* <p>
* The Server collects all configuration properties prefixed with
* <code>#SERVER#</code>. The property names are then trimmed from the
* <code>#SERVER#</code> prefix.
* <p>
* The Server log configuration is loaded from the
* <code>#SERVICE#-log4j.properties</code> file in the configuration directory.
* <p>
* The lifecycle of server is defined in by {@link Server.Status} enum.
* When a server is create, its status is UNDEF, when being initialized it is
* BOOTING, once initialization is complete by default transitions to NORMAL.
* The <code>#SERVER#.startup.status</code> configuration property can be used
* to specify a different startup status (NORMAL, ADMIN or HALTED).
* <p>
* Services classes are defined in the <code>#SERVER#.services</code> and
* <code>#SERVER#.services.ext</code> properties. They are loaded in order
* (services first, then services.ext).
* <p>
* Before initializing the services, they are traversed and duplicate service
* interface are removed from the service list. The last service using a given
* interface wins (this enables a simple override mechanism).
* <p>
* After the services have been resoloved by interface de-duplication they are
* initialized in order. Once all services are initialized they are
* post-initialized (this enables late/conditional service bindings).
*/
@InterfaceAudience.Private
public class Server {
private Logger log;
/**
* Server property name that defines the service classes.
*/
public static final String CONF_SERVICES = "services";
/**
* Server property name that defines the service extension classes.
*/
public static final String CONF_SERVICES_EXT = "services.ext";
/**
* Server property name that defines server startup status.
*/
public static final String CONF_STARTUP_STATUS = "startup.status";
/**
* Enumeration that defines the server status.
*/
@InterfaceAudience.Private
public static enum Status {
UNDEF(false, false),
BOOTING(false, true),
HALTED(true, true),
ADMIN(true, true),
NORMAL(true, true),
SHUTTING_DOWN(false, true),
SHUTDOWN(false, false);
private boolean settable;
private boolean operational;
/**
* Status constructor.
*
* @param settable indicates if the status is settable.
* @param operational indicates if the server is operational
* when in this status.
*/
private Status(boolean settable, boolean operational) {
this.settable = settable;
this.operational = operational;
}
/**
* Returns if this server status is operational.
*
* @return if this server status is operational.
*/
public boolean isOperational() {
return operational;
}
}
/**
* Name of the log4j configuration file the Server will load from the
* classpath if the <code>#SERVER#-log4j.properties</code> is not defined
* in the server configuration directory.
*/
public static final String DEFAULT_LOG4J_PROPERTIES = "default-log4j.properties";
private Status status;
private String name;
private String homeDir;
private String configDir;
private String logDir;
private String tempDir;
private Configuration config;
private Map<Class, Service> services = new LinkedHashMap<Class, Service>();
/**
* Creates a server instance.
* <p>
* The config, log and temp directories are all under the specified home directory.
*
* @param name server name.
* @param homeDir server home directory.
*/
public Server(String name, String homeDir) {
this(name, homeDir, null);
}
/**
* Creates a server instance.
*
* @param name server name.
* @param homeDir server home directory.
* @param configDir config directory.
* @param logDir log directory.
* @param tempDir temp directory.
*/
public Server(String name, String homeDir, String configDir, String logDir, String tempDir) {
this(name, homeDir, configDir, logDir, tempDir, null);
}
/**
* Creates a server instance.
* <p>
* The config, log and temp directories are all under the specified home directory.
* <p>
* It uses the provided configuration instead loading it from the config dir.
*
* @param name server name.
* @param homeDir server home directory.
* @param config server configuration.
*/
public Server(String name, String homeDir, Configuration config) {
this(name, homeDir, homeDir + "/conf", homeDir + "/log", homeDir + "/temp", config);
}
/**
* Creates a server instance.
* <p>
* It uses the provided configuration instead loading it from the config dir.
*
* @param name server name.
* @param homeDir server home directory.
* @param configDir config directory.
* @param logDir log directory.
* @param tempDir temp directory.
* @param config server configuration.
*/
public Server(String name, String homeDir, String configDir, String logDir, String tempDir, Configuration config) {
this.name = StringUtils.toLowerCase(Check.notEmpty(name, "name").trim());
this.homeDir = Check.notEmpty(homeDir, "homeDir");
this.configDir = Check.notEmpty(configDir, "configDir");
this.logDir = Check.notEmpty(logDir, "logDir");
this.tempDir = Check.notEmpty(tempDir, "tempDir");
checkAbsolutePath(homeDir, "homeDir");
checkAbsolutePath(configDir, "configDir");
checkAbsolutePath(logDir, "logDir");
checkAbsolutePath(tempDir, "tempDir");
if (config != null) {
this.config = new Configuration(false);
ConfigurationUtils.copy(config, this.config);
}
status = Status.UNDEF;
}
/**
* Validates that the specified value is an absolute path (starts with '/').
*
* @param value value to verify it is an absolute path.
* @param name name to use in the exception if the value is not an absolute
* path.
*
* @return the value.
*
* @throws IllegalArgumentException thrown if the value is not an absolute
* path.
*/
private String checkAbsolutePath(String value, String name) {
if (!new File(value).isAbsolute()) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] must be an absolute path [{1}]", name, value));
}
return value;
}
/**
* Returns the current server status.
*
* @return the current server status.
*/
public Status getStatus() {
return status;
}
/**
* Sets a new server status.
* <p>
* The status must be settable.
* <p>
* All services will be notified o the status change via the
* {@link Service#serverStatusChange(Server.Status, Server.Status)} method. If a service
* throws an exception during the notification, the server will be destroyed.
*
* @param status status to set.
*
* @throws ServerException thrown if the service has been destroy because of
* a failed notification to a service.
*/
public void setStatus(Status status) throws ServerException {
Check.notNull(status, "status");
if (status.settable) {
if (status != this.status) {
Status oldStatus = this.status;
this.status = status;
for (Service service : services.values()) {
try {
service.serverStatusChange(oldStatus, status);
} catch (Exception ex) {
log.error("Service [{}] exception during status change to [{}] -server shutting down-, {}",
new Object[]{service.getInterface().getSimpleName(), status, ex.getMessage(), ex});
destroy();
throw new ServerException(ServerException.ERROR.S11, service.getInterface().getSimpleName(),
status, ex.getMessage(), ex);
}
}
}
} else {
throw new IllegalArgumentException("Status [" + status + " is not settable");
}
}
/**
* Verifies the server is operational.
*
* @throws IllegalStateException thrown if the server is not operational.
*/
protected void ensureOperational() {
if (!getStatus().isOperational()) {
throw new IllegalStateException("Server is not running");
}
}
/**
* Convenience method that returns a resource as inputstream from the
* classpath.
* <p>
* It first attempts to use the Thread's context classloader and if not
* set it uses the <code>ClassUtils</code> classloader.
*
* @param name resource to retrieve.
*
* @return inputstream with the resource, NULL if the resource does not
* exist.
*/
static InputStream getResource(String name) {
Check.notEmpty(name, "name");
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
cl = Server.class.getClassLoader();
}
return cl.getResourceAsStream(name);
}
/**
* Initializes the Server.
* <p>
* The initialization steps are:
* <ul>
* <li>It verifies the service home and temp directories exist</li>
* <li>Loads the Server <code>#SERVER#-default.xml</code>
* configuration file from the classpath</li>
* <li>Initializes log4j logging. If the
* <code>#SERVER#-log4j.properties</code> file does not exist in the config
* directory it load <code>default-log4j.properties</code> from the classpath
* </li>
* <li>Loads the <code>#SERVER#-site.xml</code> file from the server config
* directory and merges it with the default configuration.</li>
* <li>Loads the services</li>
* <li>Initializes the services</li>
* <li>Post-initializes the services</li>
* <li>Sets the server startup status</li>
* </ul>
*
* @throws ServerException thrown if the server could not be initialized.
*/
public void init() throws ServerException {
if (status != Status.UNDEF) {
throw new IllegalStateException("Server already initialized");
}
status = Status.BOOTING;
verifyDir(homeDir);
verifyDir(tempDir);
Properties serverInfo = new Properties();
try {
InputStream is = getResource(name + ".properties");
serverInfo.load(is);
is.close();
} catch (IOException ex) {
throw new RuntimeException("Could not load server information file: " + name + ".properties");
}
initLog();
log.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++");
log.info("Server [{}] starting", name);
log.info(" Built information:");
log.info(" Version : {}", serverInfo.getProperty(name + ".version", "undef"));
log.info(" Source Repository : {}", serverInfo.getProperty(name + ".source.repository", "undef"));
log.info(" Source Revision : {}", serverInfo.getProperty(name + ".source.revision", "undef"));
log.info(" Built by : {}", serverInfo.getProperty(name + ".build.username", "undef"));
log.info(" Built timestamp : {}", serverInfo.getProperty(name + ".build.timestamp", "undef"));
log.info(" Runtime information:");
log.info(" Home dir: {}", homeDir);
log.info(" Config dir: {}", (config == null) ? configDir : "-");
log.info(" Log dir: {}", logDir);
log.info(" Temp dir: {}", tempDir);
initConfig();
log.debug("Loading services");
List<Service> list = loadServices();
try {
log.debug("Initializing services");
initServices(list);
log.info("Services initialized");
} catch (ServerException ex) {
log.error("Services initialization failure, destroying initialized services");
destroyServices();
throw ex;
}
Status status = Status.valueOf(getConfig().get(getPrefixedName(CONF_STARTUP_STATUS), Status.NORMAL.toString()));
setStatus(status);
log.info("Server [{}] started!, status [{}]", name, status);
}
/**
* Verifies the specified directory exists.
*
* @param dir directory to verify it exists.
*
* @throws ServerException thrown if the directory does not exist or it the
* path it is not a directory.
*/
private void verifyDir(String dir) throws ServerException {
File file = new File(dir);
if (!file.exists()) {
throw new ServerException(ServerException.ERROR.S01, dir);
}
if (!file.isDirectory()) {
throw new ServerException(ServerException.ERROR.S02, dir);
}
}
/**
* Initializes Log4j logging.
*
* @throws ServerException thrown if Log4j could not be initialized.
*/
protected void initLog() throws ServerException {
verifyDir(logDir);
LogManager.resetConfiguration();
File log4jFile = new File(configDir, name + "-log4j.properties");
if (log4jFile.exists()) {
PropertyConfigurator.configureAndWatch(log4jFile.toString(), 10 * 1000); //every 10 secs
log = LoggerFactory.getLogger(Server.class);
} else {
Properties props = new Properties();
try {
InputStream is = getResource(DEFAULT_LOG4J_PROPERTIES);
try {
props.load(is);
} finally {
is.close();
}
} catch (IOException ex) {
throw new ServerException(ServerException.ERROR.S03, DEFAULT_LOG4J_PROPERTIES, ex.getMessage(), ex);
}
PropertyConfigurator.configure(props);
log = LoggerFactory.getLogger(Server.class);
log.warn("Log4j [{}] configuration file not found, using default configuration from classpath", log4jFile);
}
}
/**
* Loads and inializes the server configuration.
*
* @throws ServerException thrown if the configuration could not be loaded/initialized.
*/
protected void initConfig() throws ServerException {
verifyDir(configDir);
File file = new File(configDir);
Configuration defaultConf;
String defaultConfig = name + "-default.xml";
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
InputStream inputStream = classLoader.getResourceAsStream(defaultConfig);
if (inputStream == null) {
log.warn("Default configuration file not available in classpath [{}]", defaultConfig);
defaultConf = new Configuration(false);
} else {
try {
defaultConf = new Configuration(false);
ConfigurationUtils.load(defaultConf, inputStream);
} catch (Exception ex) {
throw new ServerException(ServerException.ERROR.S03, defaultConfig, ex.getMessage(), ex);
}
}
if (config == null) {
Configuration siteConf;
File siteFile = new File(file, name + "-site.xml");
if (!siteFile.exists()) {
log.warn("Site configuration file [{}] not found in config directory", siteFile);
siteConf = new Configuration(false);
} else {
if (!siteFile.isFile()) {
throw new ServerException(ServerException.ERROR.S05, siteFile.getAbsolutePath());
}
try {
log.debug("Loading site configuration from [{}]", siteFile);
inputStream = new FileInputStream(siteFile);
siteConf = new Configuration(false);
ConfigurationUtils.load(siteConf, inputStream);
} catch (IOException ex) {
throw new ServerException(ServerException.ERROR.S06, siteFile, ex.getMessage(), ex);
}
}
config = new Configuration(false);
ConfigurationUtils.copy(siteConf, config);
}
ConfigurationUtils.injectDefaults(defaultConf, config);
for (String name : System.getProperties().stringPropertyNames()) {
String value = System.getProperty(name);
if (name.startsWith(getPrefix() + ".")) {
config.set(name, value);
if (name.endsWith(".password") || name.endsWith(".secret")) {
value = "*MASKED*";
}
log.info("System property sets {}: {}", name, value);
}
}
log.debug("Loaded Configuration:");
log.debug("------------------------------------------------------");
for (Map.Entry<String, String> entry : config) {
String name = entry.getKey();
String value = config.get(entry.getKey());
if (name.endsWith(".password") || name.endsWith(".secret")) {
value = "*MASKED*";
}
log.debug(" {}: {}", entry.getKey(), value);
}
log.debug("------------------------------------------------------");
}
/**
* Loads the specified services.
*
* @param classes services classes to load.
* @param list list of loaded service in order of appearance in the
* configuration.
*
* @throws ServerException thrown if a service class could not be loaded.
*/
private void loadServices(Class[] classes, List<Service> list) throws ServerException {
for (Class klass : classes) {
try {
Service service = (Service) klass.newInstance();
log.debug("Loading service [{}] implementation [{}]", service.getInterface(),
service.getClass());
if (!service.getInterface().isInstance(service)) {
throw new ServerException(ServerException.ERROR.S04, klass, service.getInterface().getName());
}
list.add(service);
} catch (ServerException ex) {
throw ex;
} catch (Exception ex) {
throw new ServerException(ServerException.ERROR.S07, klass, ex.getMessage(), ex);
}
}
}
/**
* Loads services defined in <code>services</code> and
* <code>services.ext</code> and de-dups them.
*
* @return List of final services to initialize.
*
* @throws ServerException throw if the services could not be loaded.
*/
protected List<Service> loadServices() throws ServerException {
try {
Map<Class, Service> map = new LinkedHashMap<Class, Service>();
Class[] classes = getConfig().getClasses(getPrefixedName(CONF_SERVICES));
Class[] classesExt = getConfig().getClasses(getPrefixedName(CONF_SERVICES_EXT));
List<Service> list = new ArrayList<Service>();
loadServices(classes, list);
loadServices(classesExt, list);
//removing duplicate services, strategy: last one wins
for (Service service : list) {
if (map.containsKey(service.getInterface())) {
log.debug("Replacing service [{}] implementation [{}]", service.getInterface(),
service.getClass());
}
map.put(service.getInterface(), service);
}
list = new ArrayList<Service>();
for (Map.Entry<Class, Service> entry : map.entrySet()) {
list.add(entry.getValue());
}
return list;
} catch (RuntimeException ex) {
throw new ServerException(ServerException.ERROR.S08, ex.getMessage(), ex);
}
}
/**
* Initializes the list of services.
*
* @param services services to initialized, it must be a de-dupped list of
* services.
*
* @throws ServerException thrown if the services could not be initialized.
*/
protected void initServices(List<Service> services) throws ServerException {
for (Service service : services) {
log.debug("Initializing service [{}]", service.getInterface());
checkServiceDependencies(service);
service.init(this);
this.services.put(service.getInterface(), service);
}
for (Service service : services) {
service.postInit();
}
}
/**
* Checks if all service dependencies of a service are available.
*
* @param service service to check if all its dependencies are available.
*
* @throws ServerException thrown if a service dependency is missing.
*/
protected void checkServiceDependencies(Service service) throws ServerException {
if (service.getServiceDependencies() != null) {
for (Class dependency : service.getServiceDependencies()) {
if (services.get(dependency) == null) {
throw new ServerException(ServerException.ERROR.S10, service.getClass(), dependency);
}
}
}
}
/**
* Destroys the server services.
*/
protected void destroyServices() {
List<Service> list = new ArrayList<Service>(services.values());
Collections.reverse(list);
for (Service service : list) {
try {
log.debug("Destroying service [{}]", service.getInterface());
service.destroy();
} catch (Throwable ex) {
log.error("Could not destroy service [{}], {}",
new Object[]{service.getInterface(), ex.getMessage(), ex});
}
}
log.info("Services destroyed");
}
/**
* Destroys the server.
* <p>
* All services are destroyed in reverse order of initialization, then the
* Log4j framework is shutdown.
*/
public void destroy() {
ensureOperational();
destroyServices();
log.info("Server [{}] shutdown!", name);
log.info("======================================================");
if (!Boolean.getBoolean("test.circus")) {
LogManager.shutdown();
}
status = Status.SHUTDOWN;
}
/**
* Returns the name of the server.
*
* @return the server name.
*/
public String getName() {
return name;
}
/**
* Returns the server prefix for server configuration properties.
* <p>
* By default it is the server name.
*
* @return the prefix for server configuration properties.
*/
public String getPrefix() {
return getName();
}
/**
* Returns the prefixed name of a server property.
*
* @param name of the property.
*
* @return prefixed name of the property.
*/
public String getPrefixedName(String name) {
return getPrefix() + "." + Check.notEmpty(name, "name");
}
/**
* Returns the server home dir.
*
* @return the server home dir.
*/
public String getHomeDir() {
return homeDir;
}
/**
* Returns the server config dir.
*
* @return the server config dir.
*/
public String getConfigDir() {
return configDir;
}
/**
* Returns the server log dir.
*
* @return the server log dir.
*/
public String getLogDir() {
return logDir;
}
/**
* Returns the server temp dir.
*
* @return the server temp dir.
*/
public String getTempDir() {
return tempDir;
}
/**
* Returns the server configuration.
*
* @return the server configuration.
*/
public Configuration getConfig() {
return config;
}
/**
* Returns the {@link Service} associated to the specified interface.
*
* @param serviceKlass service interface.
*
* @return the service implementation.
*/
@SuppressWarnings("unchecked")
public <T> T get(Class<T> serviceKlass) {
ensureOperational();
Check.notNull(serviceKlass, "serviceKlass");
return (T) services.get(serviceKlass);
}
/**
* Adds a service programmatically.
* <p>
* If a service with the same interface exists, it will be destroyed and
* removed before the given one is initialized and added.
* <p>
* If an exception is thrown the server is destroyed.
*
* @param klass service class to add.
*
* @throws ServerException throw if the service could not initialized/added
* to the server.
*/
public void setService(Class<? extends Service> klass) throws ServerException {
ensureOperational();
Check.notNull(klass, "serviceKlass");
if (getStatus() == Status.SHUTTING_DOWN) {
throw new IllegalStateException("Server shutting down");
}
try {
Service newService = klass.newInstance();
Service oldService = services.get(newService.getInterface());
if (oldService != null) {
try {
oldService.destroy();
} catch (Throwable ex) {
log.error("Could not destroy service [{}], {}",
new Object[]{oldService.getInterface(), ex.getMessage(), ex});
}
}
newService.init(this);
services.put(newService.getInterface(), newService);
} catch (Exception ex) {
log.error("Could not set service [{}] programmatically -server shutting down-, {}", klass, ex);
destroy();
throw new ServerException(ServerException.ERROR.S09, klass, ex.getMessage(), ex);
}
}
}
| 25,857 | 32.365161 | 117 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
import java.text.MessageFormat;
@InterfaceAudience.Private
public abstract class BooleanParam extends Param<Boolean> {
public BooleanParam(String name, Boolean defaultValue) {
super(name, defaultValue);
}
@Override
protected Boolean parse(String str) throws Exception {
if (str.equalsIgnoreCase("true")) {
return true;
} else if (str.equalsIgnoreCase("false")) {
return false;
}
throw new IllegalArgumentException(MessageFormat.format("Invalid value [{0}], must be a boolean", str));
}
@Override
protected String getDomain() {
return "a boolean";
}
}
| 1,512 | 31.191489 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public abstract class ByteParam extends Param<Byte> {
public ByteParam(String name, Byte defaultValue) {
super(name, defaultValue);
}
@Override
protected Byte parse(String str) throws Exception {
return Byte.parseByte(str);
}
@Override
protected String getDomain() {
return "a byte";
}
}
| 1,251 | 30.3 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ExceptionProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
@InterfaceAudience.Private
public class ExceptionProvider implements ExceptionMapper<Throwable> {
private static Logger LOG = LoggerFactory.getLogger(ExceptionProvider.class);
private static final String ENTER = System.getProperty("line.separator");
protected Response createResponse(Response.Status status, Throwable throwable) {
return HttpExceptionUtils.createJerseyExceptionResponse(status, throwable);
}
protected String getOneLineMessage(Throwable throwable) {
String message = throwable.getMessage();
if (message != null) {
int i = message.indexOf(ENTER);
if (i > -1) {
message = message.substring(0, i);
}
}
return message;
}
protected void log(Response.Status status, Throwable throwable) {
LOG.debug("{}", throwable.getMessage(), throwable);
}
@Override
public Response toResponse(Throwable throwable) {
return createResponse(Response.Status.BAD_REQUEST, throwable);
}
}
| 2,050 | 33.183333 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.IOUtils;
import javax.ws.rs.core.StreamingOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@InterfaceAudience.Private
public class InputStreamEntity implements StreamingOutput {
private InputStream is;
private long offset;
private long len;
public InputStreamEntity(InputStream is, long offset, long len) {
this.is = is;
this.offset = offset;
this.len = len;
}
public InputStreamEntity(InputStream is) {
this(is, 0, -1);
}
@Override
public void write(OutputStream os) throws IOException {
IOUtils.skipFully(is, offset);
if (len == -1) {
IOUtils.copyBytes(is, os, 4096, true);
} else {
IOUtils.copyBytes(is, os, len, true);
}
}
}
| 1,674 | 29.454545 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.json.simple.JSONStreamAware;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Provider;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
@Provider
@Produces(MediaType.APPLICATION_JSON)
@InterfaceAudience.Private
public class JSONProvider implements MessageBodyWriter<JSONStreamAware> {
private static final String ENTER = System.getProperty("line.separator");
@Override
public boolean isWriteable(Class<?> aClass, Type type, Annotation[] annotations, MediaType mediaType) {
return JSONStreamAware.class.isAssignableFrom(aClass);
}
@Override
public long getSize(JSONStreamAware jsonStreamAware, Class<?> aClass, Type type, Annotation[] annotations,
MediaType mediaType) {
return -1;
}
@Override
public void writeTo(JSONStreamAware jsonStreamAware, Class<?> aClass, Type type, Annotation[] annotations,
MediaType mediaType, MultivaluedMap<String, Object> stringObjectMultivaluedMap,
OutputStream outputStream) throws IOException, WebApplicationException {
Writer writer = new OutputStreamWriter(outputStream, Charsets.UTF_8);
jsonStreamAware.writeJSONString(writer);
writer.write(ENTER);
writer.flush();
}
}
| 2,477 | 36.545455 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.collect.Lists;
import java.util.List;
import java.util.Map;
/**
* Class that contains all parsed JAX-RS parameters.
* <p>
* Instances are created by the {@link ParametersProvider} class.
*/
@InterfaceAudience.Private
public class Parameters {
private Map<String, List<Param<?>>> params;
/**
* Constructor that receives the request parsed parameters.
*
* @param params the request parsed parameters.
*/
public Parameters(Map<String, List<Param<?>>> params) {
this.params = params;
}
/**
* Returns the value of a request parsed parameter.
*
* @param name parameter name.
* @param klass class of the parameter, used for value casting.
* @return the value of the parameter.
*/
@SuppressWarnings("unchecked")
public <V, T extends Param<V>> V get(String name, Class<T> klass) {
List<Param<?>> multiParams = (List<Param<?>>)params.get(name);
if (multiParams != null && multiParams.size() > 0) {
return ((T) multiParams.get(0)).value(); // Return first value;
}
return null;
}
/**
* Returns the values of a request parsed parameter.
*
* @param name parameter name.
* @param klass class of the parameter, used for value casting.
* @return the values of the parameter.
*/
@SuppressWarnings("unchecked")
public <V, T extends Param<V>> List<V> getValues(String name, Class<T> klass) {
List<Param<?>> multiParams = (List<Param<?>>)params.get(name);
List<V> values = Lists.newArrayList();
if (multiParams != null) {
for (Param<?> param : multiParams) {
V value = ((T) param).value();
if (value != null) {
values.add(value);
}
}
}
return values;
}
}
| 2,639 | 30.807229 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public abstract class LongParam extends Param<Long> {
public LongParam(String name, Long defaultValue) {
super(name, defaultValue);
}
@Override
protected Long parse(String str) throws Exception {
return Long.parseLong(str);
}
@Override
protected String getDomain() {
return "a long";
}
}
| 1,251 | 30.3 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import com.google.common.collect.Lists;
import com.sun.jersey.api.core.HttpContext;
import com.sun.jersey.core.spi.component.ComponentContext;
import com.sun.jersey.core.spi.component.ComponentScope;
import com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable;
import com.sun.jersey.spi.inject.Injectable;
import com.sun.jersey.spi.inject.InjectableProvider;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.StringUtils;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MultivaluedMap;
import java.lang.reflect.Type;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Jersey provider that parses the request parameters based on the
* given parameter definition.
*/
@InterfaceAudience.Private
public class ParametersProvider
extends AbstractHttpContextInjectable<Parameters>
implements InjectableProvider<Context, Type> {
private String driverParam;
private Class<? extends Enum> enumClass;
private Map<Enum, Class<Param<?>>[]> paramsDef;
public ParametersProvider(String driverParam, Class<? extends Enum> enumClass,
Map<Enum, Class<Param<?>>[]> paramsDef) {
this.driverParam = driverParam;
this.enumClass = enumClass;
this.paramsDef = paramsDef;
}
@Override
@SuppressWarnings("unchecked")
public Parameters getValue(HttpContext httpContext) {
Map<String, List<Param<?>>> map = new HashMap<String, List<Param<?>>>();
Map<String, List<String>> queryString =
httpContext.getRequest().getQueryParameters();
String str = ((MultivaluedMap<String, String>) queryString).
getFirst(driverParam);
if (str == null) {
throw new IllegalArgumentException(
MessageFormat.format("Missing Operation parameter [{0}]",
driverParam));
}
Enum op;
try {
op = Enum.valueOf(enumClass, StringUtils.toUpperCase(str));
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException(
MessageFormat.format("Invalid Operation [{0}]", str));
}
if (!paramsDef.containsKey(op)) {
throw new IllegalArgumentException(
MessageFormat.format("Unsupported Operation [{0}]", op));
}
for (Class<Param<?>> paramClass : paramsDef.get(op)) {
Param<?> param = newParam(paramClass);
List<Param<?>> paramList = Lists.newArrayList();
List<String> ps = queryString.get(param.getName());
if (ps != null) {
for (String p : ps) {
try {
param.parseParam(p);
}
catch (Exception ex) {
throw new IllegalArgumentException(ex.toString(), ex);
}
paramList.add(param);
param = newParam(paramClass);
}
} else {
paramList.add(param);
}
map.put(param.getName(), paramList);
}
return new Parameters(map);
}
private Param<?> newParam(Class<Param<?>> paramClass) {
try {
return paramClass.newInstance();
} catch (Exception ex) {
throw new UnsupportedOperationException(
MessageFormat.format(
"Param class [{0}] does not have default constructor",
paramClass.getName()));
}
}
@Override
public ComponentScope getScope() {
return ComponentScope.PerRequest;
}
@Override
public Injectable getInjectable(ComponentContext componentContext, Context context, Type type) {
return (type.equals(Parameters.class)) ? this : null;
}
}
| 4,378 | 33.210938 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.StringUtils;
import java.util.Arrays;
@InterfaceAudience.Private
public abstract class EnumParam<E extends Enum<E>> extends Param<E> {
Class<E> klass;
public EnumParam(String name, Class<E> e, E defaultValue) {
super(name, defaultValue);
klass = e;
}
@Override
protected E parse(String str) throws Exception {
return Enum.valueOf(klass, StringUtils.toUpperCase(str));
}
@Override
protected String getDomain() {
return StringUtils.join(",", Arrays.asList(klass.getEnumConstants()));
}
}
| 1,460 | 30.76087 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumSetParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.StringUtils;
@InterfaceAudience.Private
public abstract class EnumSetParam<E extends Enum<E>> extends Param<EnumSet<E>> {
Class<E> klass;
public EnumSetParam(String name, Class<E> e, EnumSet<E> defaultValue) {
super(name, defaultValue);
klass = e;
}
@Override
protected EnumSet<E> parse(String str) throws Exception {
final EnumSet<E> set = EnumSet.noneOf(klass);
if (!str.isEmpty()) {
for (String sub : str.split(",")) {
set.add(Enum.valueOf(klass, StringUtils.toUpperCase(sub.trim())));
}
}
return set;
}
@Override
protected String getDomain() {
return Arrays.asList(klass.getEnumConstants()).toString();
}
/** Convert an EnumSet to a string of comma separated values. */
public static <E extends Enum<E>> String toString(EnumSet<E> set) {
if (set == null || set.isEmpty()) {
return "";
} else {
final StringBuilder b = new StringBuilder();
final Iterator<E> i = set.iterator();
b.append(i.next());
while (i.hasNext()) {
b.append(',').append(i.next());
}
return b.toString();
}
}
@Override
public String toString() {
return getName() + "=" + toString(value);
}
}
| 2,227 | 29.944444 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONMapProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.json.simple.JSONObject;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Provider;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
import java.util.Map;
@Provider
@Produces(MediaType.APPLICATION_JSON)
@InterfaceAudience.Private
public class JSONMapProvider implements MessageBodyWriter<Map> {
private static final String ENTER = System.getProperty("line.separator");
@Override
public boolean isWriteable(Class<?> aClass, Type type, Annotation[] annotations, MediaType mediaType) {
return Map.class.isAssignableFrom(aClass);
}
@Override
public long getSize(Map map, Class<?> aClass, Type type, Annotation[] annotations, MediaType mediaType) {
return -1;
}
@Override
public void writeTo(Map map, Class<?> aClass, Type type, Annotation[] annotations,
MediaType mediaType, MultivaluedMap<String, Object> stringObjectMultivaluedMap,
OutputStream outputStream) throws IOException, WebApplicationException {
Writer writer = new OutputStreamWriter(outputStream, Charsets.UTF_8);
JSONObject.writeJSONString(map, writer);
writer.write(ENTER);
writer.flush();
}
}
| 2,404 | 35.439394 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public abstract class IntegerParam extends Param<Integer> {
public IntegerParam(String name, Integer defaultValue) {
super(name, defaultValue);
}
@Override
protected Integer parse(String str) throws Exception {
return Integer.parseInt(str);
}
@Override
protected String getDomain() {
return "an integer";
}
}
| 1,272 | 30.825 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public abstract class ShortParam extends Param<Short> {
private int radix;
public ShortParam(String name, Short defaultValue, int radix) {
super(name, defaultValue);
this.radix = radix;
}
public ShortParam(String name, Short defaultValue) {
this(name, defaultValue, 10);
}
@Override
protected Short parse(String str) throws Exception {
return Short.parseShort(str, radix);
}
@Override
protected String getDomain() {
return "a short";
}
}
| 1,417 | 29.170213 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
import java.text.MessageFormat;
import java.util.regex.Pattern;
@InterfaceAudience.Private
public abstract class StringParam extends Param<String> {
private Pattern pattern;
public StringParam(String name, String defaultValue) {
this(name, defaultValue, null);
}
public StringParam(String name, String defaultValue, Pattern pattern) {
super(name, defaultValue);
this.pattern = pattern;
parseParam(defaultValue);
}
@Override
public String parseParam(String str) {
try {
if (str != null) {
str = str.trim();
if (str.length() > 0) {
value = parse(str);
}
}
} catch (Exception ex) {
throw new IllegalArgumentException(
MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
getName(), str, getDomain()));
}
return value;
}
@Override
protected String parse(String str) throws Exception {
if (pattern != null) {
if (!pattern.matcher(str).matches()) {
throw new IllegalArgumentException("Invalid value");
}
}
return str;
}
@Override
protected String getDomain() {
return (pattern == null) ? "a string" : pattern.pattern();
}
}
| 2,142 | 29.183099 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.classification.InterfaceAudience;
import java.text.MessageFormat;
@InterfaceAudience.Private
public abstract class Param<T> {
private String name;
protected T value;
public Param(String name, T defaultValue) {
this.name = name;
this.value = defaultValue;
}
public String getName() {
return name;
}
public T parseParam(String str) {
try {
value = (str != null && str.trim().length() > 0) ? parse(str) : value;
} catch (Exception ex) {
throw new IllegalArgumentException(
MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
name, str, getDomain()));
}
return value;
}
public T value() {
return value;
}
protected abstract String getDomain();
protected abstract T parse(String str) throws Exception;
@Override
public String toString() {
return (value != null) ? value.toString() : "NULL";
}
}
| 1,815 | 27.375 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.junit.Ignore;
public class TestNativeAzureFileSystemContractMocked extends
FileSystemContractBaseTest {
@Override
protected void setUp() throws Exception {
fs = AzureBlobStorageTestAccount.createMock().getFileSystem();
}
/**
* The following tests are failing on Azure and the Azure
* file system code needs to be modified to make them pass.
* A separate work item has been opened for this.
*/
@Ignore
public void testMoveFileUnderParent() throws Throwable {
}
@Ignore
public void testRenameFileToSelf() throws Throwable {
}
@Ignore
public void testRenameChildDirForbidden() throws Exception {
}
@Ignore
public void testMoveDirUnderParent() throws Throwable {
}
@Ignore
public void testRenameDirToSelf() throws Throwable {
}
}
| 1,712 | 29.052632 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.junit.Ignore;
public class TestNativeAzureFileSystemContractLive extends
FileSystemContractBaseTest {
private AzureBlobStorageTestAccount testAccount;
@Override
protected void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
}
@Override
protected void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
}
@Override
protected void runTest() throws Throwable {
if (testAccount != null) {
super.runTest();
}
}
/**
* The following tests are failing on Azure and the Azure
* file system code needs to be modified to make them pass.
* A separate work item has been opened for this.
*/
@Ignore
public void testMoveFileUnderParent() throws Throwable {
}
@Ignore
public void testRenameFileToSelf() throws Throwable {
}
@Ignore
public void testRenameChildDirForbidden() throws Exception {
}
@Ignore
public void testMoveDirUnderParent() throws Throwable {
}
@Ignore
public void testRenameDirToSelf() throws Throwable {
}
}
| 2,123 | 26.584416 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockLocations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
public class TestNativeAzureFileSystemBlockLocations {
@Test
public void testNumberOfBlocks() throws Exception {
Configuration conf = new Configuration();
conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME, "500");
AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
.createMock(conf);
FileSystem fs = testAccount.getFileSystem();
Path testFile = createTestFile(fs, 1200);
FileStatus stat = fs.getFileStatus(testFile);
assertEquals(500, stat.getBlockSize());
testAccount.cleanup();
}
@Test
public void testBlockLocationsTypical() throws Exception {
BlockLocation[] locations = getBlockLocationsOutput(210, 50, 0, 210);
assertEquals(5, locations.length);
assertEquals("localhost", locations[0].getHosts()[0]);
assertEquals(50, locations[0].getLength());
assertEquals(10, locations[4].getLength());
assertEquals(100, locations[2].getOffset());
}
@Test
public void testBlockLocationsEmptyFile() throws Exception {
BlockLocation[] locations = getBlockLocationsOutput(0, 50, 0, 0);
assertEquals(0, locations.length);
}
@Test
public void testBlockLocationsSmallFile() throws Exception {
BlockLocation[] locations = getBlockLocationsOutput(1, 50, 0, 1);
assertEquals(1, locations.length);
assertEquals(1, locations[0].getLength());
}
@Test
public void testBlockLocationsExactBlockSizeMultiple() throws Exception {
BlockLocation[] locations = getBlockLocationsOutput(200, 50, 0, 200);
assertEquals(4, locations.length);
assertEquals(150, locations[3].getOffset());
assertEquals(50, locations[3].getLength());
}
@Test
public void testBlockLocationsSubsetOfFile() throws Exception {
BlockLocation[] locations = getBlockLocationsOutput(205, 10, 15, 35);
assertEquals(4, locations.length);
assertEquals(10, locations[0].getLength());
assertEquals(15, locations[0].getOffset());
assertEquals(5, locations[3].getLength());
assertEquals(45, locations[3].getOffset());
}
@Test
public void testBlockLocationsOutOfRangeSubsetOfFile() throws Exception {
BlockLocation[] locations = getBlockLocationsOutput(205, 10, 300, 10);
assertEquals(0, locations.length);
}
@Test
public void testBlockLocationsEmptySubsetOfFile() throws Exception {
BlockLocation[] locations = getBlockLocationsOutput(205, 10, 0, 0);
assertEquals(0, locations.length);
}
@Test
public void testBlockLocationsDifferentLocationHost() throws Exception {
BlockLocation[] locations = getBlockLocationsOutput(100, 10, 0, 100,
"myblobhost");
assertEquals(10, locations.length);
assertEquals("myblobhost", locations[0].getHosts()[0]);
}
private static BlockLocation[] getBlockLocationsOutput(int fileSize,
int blockSize, long start, long len) throws Exception {
return getBlockLocationsOutput(fileSize, blockSize, start, len, null);
}
private static BlockLocation[] getBlockLocationsOutput(int fileSize,
int blockSize, long start, long len, String blockLocationHost)
throws Exception {
Configuration conf = new Configuration();
conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME, ""
+ blockSize);
if (blockLocationHost != null) {
conf.set(NativeAzureFileSystem.AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME,
blockLocationHost);
}
AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
.createMock(conf);
FileSystem fs = testAccount.getFileSystem();
Path testFile = createTestFile(fs, fileSize);
FileStatus stat = fs.getFileStatus(testFile);
BlockLocation[] locations = fs.getFileBlockLocations(stat, start, len);
testAccount.cleanup();
return locations;
}
private static Path createTestFile(FileSystem fs, int size) throws Exception {
Path testFile = new Path("/testFile");
OutputStream outputStream = fs.create(testFile);
outputStream.write(new byte[size]);
outputStream.close();
return testFile;
}
}
| 5,196 | 36.121429 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URI;
import java.util.Arrays;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
import org.junit.Test;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.SendingRequestEvent;
import com.microsoft.azure.storage.StorageEvent;
public class TestAzureFileSystemErrorConditions {
private static final int ALL_THREE_FILE_SIZE = 1024;
@Test
public void testNoInitialize() throws Exception {
AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
boolean passed = false;
try {
store.retrieveMetadata("foo");
passed = true;
} catch (AssertionError e) {
}
assertFalse(
"Doing an operation on the store should throw if not initalized.",
passed);
}
/**
* Try accessing an unauthorized or non-existent (treated the same) container
* from WASB.
*/
@Test
public void testAccessUnauthorizedPublicContainer() throws Exception {
Path noAccessPath = new Path(
"wasb://nonExistentContainer@hopefullyNonExistentAccount/someFile");
NativeAzureFileSystem.suppressRetryPolicy();
try {
FileSystem.get(noAccessPath.toUri(), new Configuration())
.open(noAccessPath);
assertTrue("Should've thrown.", false);
} catch (AzureException ex) {
assertTrue("Unexpected message in exception " + ex,
ex.getMessage().contains(
"Unable to access container nonExistentContainer in account" +
" hopefullyNonExistentAccount"));
} finally {
NativeAzureFileSystem.resumeRetryPolicy();
}
}
@Test
public void testAccessContainerWithWrongVersion() throws Exception {
AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
MockStorageInterface mockStorage = new MockStorageInterface();
store.setAzureStorageInteractionLayer(mockStorage);
FileSystem fs = new NativeAzureFileSystem(store);
try {
Configuration conf = new Configuration();
AzureBlobStorageTestAccount.setMockAccountKey(conf);
HashMap<String, String> metadata = new HashMap<String, String>();
metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
"2090-04-05"); // It's from the future!
mockStorage.addPreExistingContainer(
AzureBlobStorageTestAccount.getMockContainerUri(), metadata);
boolean passed = false;
try {
fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
fs.listStatus(new Path("/"));
passed = true;
} catch (AzureException ex) {
assertTrue("Unexpected exception message: " + ex,
ex.getMessage().contains("unsupported version: 2090-04-05."));
}
assertFalse("Should've thrown an exception because of the wrong version.",
passed);
} finally {
fs.close();
}
}
private interface ConnectionRecognizer {
boolean isTargetConnection(HttpURLConnection connection);
}
private class TransientErrorInjector extends StorageEvent<SendingRequestEvent> {
final ConnectionRecognizer connectionRecognizer;
private boolean injectedErrorOnce = false;
public TransientErrorInjector(ConnectionRecognizer connectionRecognizer) {
this.connectionRecognizer = connectionRecognizer;
}
@Override
public void eventOccurred(SendingRequestEvent eventArg) {
HttpURLConnection connection = (HttpURLConnection)eventArg.getConnectionObject();
if (!connectionRecognizer.isTargetConnection(connection)) {
return;
}
if (!injectedErrorOnce) {
connection.setReadTimeout(1);
connection.disconnect();
injectedErrorOnce = true;
}
}
}
private void injectTransientError(NativeAzureFileSystem fs,
final ConnectionRecognizer connectionRecognizer) {
fs.getStore().addTestHookToOperationContext(new TestHookOperationContext() {
@Override
public OperationContext modifyOperationContext(OperationContext original) {
original.getSendingRequestEventHandler().addListener(
new TransientErrorInjector(connectionRecognizer));
return original;
}
});
}
@Test
public void testTransientErrorOnDelete() throws Exception {
// Need to do this test against a live storage account
AzureBlobStorageTestAccount testAccount =
AzureBlobStorageTestAccount.create();
assumeNotNull(testAccount);
try {
NativeAzureFileSystem fs = testAccount.getFileSystem();
injectTransientError(fs, new ConnectionRecognizer() {
@Override
public boolean isTargetConnection(HttpURLConnection connection) {
return connection.getRequestMethod().equals("DELETE");
}
});
Path testFile = new Path("/a/b");
assertTrue(fs.createNewFile(testFile));
assertTrue(fs.rename(testFile, new Path("/x")));
} finally {
testAccount.cleanup();
}
}
private void writeAllThreeFile(NativeAzureFileSystem fs, Path testFile)
throws IOException {
byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
Arrays.fill(buffer, (byte)3);
OutputStream stream = fs.create(testFile);
stream.write(buffer);
stream.close();
}
private void readAllThreeFile(NativeAzureFileSystem fs, Path testFile)
throws IOException {
byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
InputStream inStream = fs.open(testFile);
assertEquals(buffer.length,
inStream.read(buffer, 0, buffer.length));
inStream.close();
for (int i = 0; i < buffer.length; i++) {
assertEquals(3, buffer[i]);
}
}
@Test
public void testTransientErrorOnCommitBlockList() throws Exception {
// Need to do this test against a live storage account
AzureBlobStorageTestAccount testAccount =
AzureBlobStorageTestAccount.create();
assumeNotNull(testAccount);
try {
NativeAzureFileSystem fs = testAccount.getFileSystem();
injectTransientError(fs, new ConnectionRecognizer() {
@Override
public boolean isTargetConnection(HttpURLConnection connection) {
return connection.getRequestMethod().equals("PUT")
&& connection.getURL().getQuery() != null
&& connection.getURL().getQuery().contains("blocklist");
}
});
Path testFile = new Path("/a/b");
writeAllThreeFile(fs, testFile);
readAllThreeFile(fs, testFile);
} finally {
testAccount.cleanup();
}
}
@Test
public void testTransientErrorOnRead() throws Exception {
// Need to do this test against a live storage account
AzureBlobStorageTestAccount testAccount =
AzureBlobStorageTestAccount.create();
assumeNotNull(testAccount);
try {
NativeAzureFileSystem fs = testAccount.getFileSystem();
Path testFile = new Path("/a/b");
writeAllThreeFile(fs, testFile);
injectTransientError(fs, new ConnectionRecognizer() {
@Override
public boolean isTargetConnection(HttpURLConnection connection) {
return connection.getRequestMethod().equals("GET");
}
});
readAllThreeFile(fs, testFile);
} finally {
testAccount.cleanup();
}
}
}
| 8,519 | 34.206612 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
public class TestWasbFsck {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
fs = testAccount.getFileSystem();
backingStore = testAccount.getMockStorage().getBackingStore();
}
@After
public void tearDown() throws Exception {
testAccount.cleanup();
fs = null;
backingStore = null;
}
/**
* Counts the number of temporary blobs in the backing store.
*/
private int getNumTempBlobs() {
int count = 0;
for (String key : backingStore.getKeys()) {
if (key.contains(NativeAzureFileSystem.AZURE_TEMP_FOLDER)) {
count++;
}
}
return count;
}
/**
* Tests that we recover files properly
*/
@Test
@Ignore /* flush() no longer does anything @@TODO: reinstate an appropriate test of fsck recovery*/
public void testRecover() throws Exception {
Path danglingFile = new Path("/crashedInTheMiddle");
// Create a file and leave it dangling and try to recover it.
FSDataOutputStream stream = fs.create(danglingFile);
stream.write(new byte[] { 1, 2, 3 });
stream.flush();
// Now we should still only see a zero-byte file in this place
FileStatus fileStatus = fs.getFileStatus(danglingFile);
assertNotNull(fileStatus);
assertEquals(0, fileStatus.getLen());
assertEquals(1, getNumTempBlobs());
// Run WasbFsck -move to recover the file.
runFsck("-move");
// Now we should the see the file in lost+found with the data there.
fileStatus = fs.getFileStatus(new Path("/lost+found",
danglingFile.getName()));
assertNotNull(fileStatus);
assertEquals(3, fileStatus.getLen());
assertEquals(0, getNumTempBlobs());
// But not in its original location
assertFalse(fs.exists(danglingFile));
}
private void runFsck(String command) throws Exception {
Configuration conf = fs.getConf();
// Set the dangling cutoff to zero, so every temp blob is considered
// dangling.
conf.setInt(NativeAzureFileSystem.AZURE_TEMP_EXPIRY_PROPERTY_NAME, 0);
WasbFsck fsck = new WasbFsck(conf);
fsck.setMockFileSystemForTesting(fs);
fsck.run(new String[] { AzureBlobStorageTestAccount.MOCK_WASB_URI, command });
}
/**
* Tests that we delete dangling files properly
*/
@Test
public void testDelete() throws Exception {
Path danglingFile = new Path("/crashedInTheMiddle");
// Create a file and leave it dangling and try to delete it.
FSDataOutputStream stream = fs.create(danglingFile);
stream.write(new byte[] { 1, 2, 3 });
stream.flush();
// Now we should still only see a zero-byte file in this place
FileStatus fileStatus = fs.getFileStatus(danglingFile);
assertNotNull(fileStatus);
assertEquals(0, fileStatus.getLen());
assertEquals(1, getNumTempBlobs());
// Run WasbFsck -delete to delete the file.
runFsck("-delete");
// Now we should see no trace of the file.
assertEquals(0, getNumTempBlobs());
assertFalse(fs.exists(danglingFile));
}
}
| 4,455 | 32.007407 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import java.io.File;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.junit.Assert;
import org.junit.Test;
public class TestShellDecryptionKeyProvider {
public static final Log LOG = LogFactory
.getLog(TestShellDecryptionKeyProvider.class);
private static File TEST_ROOT_DIR = new File(System.getProperty(
"test.build.data", "/tmp"), "TestShellDecryptionKeyProvider");
@Test
public void testScriptPathNotSpecified() throws Exception {
if (!Shell.WINDOWS) {
return;
}
ShellDecryptionKeyProvider provider = new ShellDecryptionKeyProvider();
Configuration conf = new Configuration();
String account = "testacct";
String key = "key";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
try {
provider.getStorageAccountKey(account, conf);
Assert
.fail("fs.azure.shellkeyprovider.script is not specified, we should throw");
} catch (KeyProviderException e) {
LOG.info("Received an expected exception: " + e.getMessage());
}
}
@Test
public void testValidScript() throws Exception {
if (!Shell.WINDOWS) {
return;
}
String expectedResult = "decretedKey";
// Create a simple script which echoes the given key plus the given
// expected result (so that we validate both script input and output)
File scriptFile = new File(TEST_ROOT_DIR, "testScript.cmd");
FileUtils.writeStringToFile(scriptFile, "@echo %1 " + expectedResult);
ShellDecryptionKeyProvider provider = new ShellDecryptionKeyProvider();
Configuration conf = new Configuration();
String account = "testacct";
String key = "key1";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
conf.set(ShellDecryptionKeyProvider.KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT,
"cmd /c " + scriptFile.getAbsolutePath());
String result = provider.getStorageAccountKey(account, conf);
assertEquals(key + " " + expectedResult, result);
}
}
| 3,038 | 35.614458 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.microsoft.azure.storage.blob.BlobOutputStream;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
public class TestOutOfBandAzureBlobOperationsLive {
private FileSystem fs;
private AzureBlobStorageTestAccount testAccount;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(testAccount);
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
}
// scenario for this particular test described at MONARCH-HADOOP-764
// creating a file out-of-band would confuse mkdirs("<oobfilesUncleFolder>")
// eg oob creation of "user/<name>/testFolder/a/input/file"
// Then wasb creation of "user/<name>/testFolder/a/output" fails
@Test
public void outOfBandFolder_uncleMkdirs() throws Exception {
// NOTE: manual use of CloubBlockBlob targets working directory explicitly.
// WASB driver methods prepend working directory implicitly.
String workingDir = "user/"
+ UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ "testFolder1/a/input/file");
BlobOutputStream s = blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("testFolder1/a/input/file")));
Path targetFolder = new Path("testFolder1/a/output");
assertTrue(fs.mkdirs(targetFolder));
}
// scenario for this particular test described at MONARCH-HADOOP-764
@Test
public void outOfBandFolder_parentDelete() throws Exception {
// NOTE: manual use of CloubBlockBlob targets working directory explicitly.
// WASB driver methods prepend working directory implicitly.
String workingDir = "user/"
+ UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ "testFolder2/a/input/file");
BlobOutputStream s = blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("testFolder2/a/input/file")));
Path targetFolder = new Path("testFolder2/a/input");
assertTrue(fs.delete(targetFolder, true));
}
@Test
public void outOfBandFolder_rootFileDelete() throws Exception {
CloudBlockBlob blob = testAccount.getBlobReference("fileY");
BlobOutputStream s = blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("/fileY")));
assertTrue(fs.delete(new Path("/fileY"), true));
}
@Test
public void outOfBandFolder_firstLevelFolderDelete() throws Exception {
CloudBlockBlob blob = testAccount.getBlobReference("folderW/file");
BlobOutputStream s = blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("/folderW")));
assertTrue(fs.exists(new Path("/folderW/file")));
assertTrue(fs.delete(new Path("/folderW"), true));
}
// scenario for this particular test described at MONARCH-HADOOP-764
@Test
public void outOfBandFolder_siblingCreate() throws Exception {
// NOTE: manual use of CloubBlockBlob targets working directory explicitly.
// WASB driver methods prepend working directory implicitly.
String workingDir = "user/"
+ UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ "testFolder3/a/input/file");
BlobOutputStream s = blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("testFolder3/a/input/file")));
Path targetFile = new Path("testFolder3/a/input/file2");
FSDataOutputStream s2 = fs.create(targetFile);
s2.close();
}
// scenario for this particular test described at MONARCH-HADOOP-764
// creating a new file in the root folder
@Test
public void outOfBandFolder_create_rootDir() throws Exception {
Path targetFile = new Path("/newInRoot");
FSDataOutputStream s2 = fs.create(targetFile);
s2.close();
}
// scenario for this particular test described at MONARCH-HADOOP-764
@Test
public void outOfBandFolder_rename() throws Exception {
// NOTE: manual use of CloubBlockBlob targets working directory explicitly.
// WASB driver methods prepend working directory implicitly.
String workingDir = "user/"
+ UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob = testAccount.getBlobReference(workingDir
+ "testFolder4/a/input/file");
BlobOutputStream s = blob.openOutputStream();
s.close();
Path srcFilePath = new Path("testFolder4/a/input/file");
assertTrue(fs.exists(srcFilePath));
Path destFilePath = new Path("testFolder4/a/input/file2");
fs.rename(srcFilePath, destFilePath);
}
// Verify that you can rename a file which is the only file in an implicit folder in the
// WASB file system.
// scenario for this particular test described at MONARCH-HADOOP-892
@Test
public void outOfBandSingleFile_rename() throws Exception {
//NOTE: manual use of CloubBlockBlob targets working directory explicitly.
// WASB driver methods prepend working directory implicitly.
String workingDir = "user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob = testAccount.getBlobReference(workingDir + "testFolder5/a/input/file");
BlobOutputStream s = blob.openOutputStream();
s.close();
Path srcFilePath = new Path("testFolder5/a/input/file");
assertTrue(fs.exists(srcFilePath));
Path destFilePath = new Path("testFolder5/file2");
fs.rename(srcFilePath, destFilePath);
}
// WASB must force explicit parent directories in create, delete, mkdirs, rename.
// scenario for this particular test described at MONARCH-HADOOP-764
@Test
public void outOfBandFolder_rename_rootLevelFiles() throws Exception {
// NOTE: manual use of CloubBlockBlob targets working directory explicitly.
// WASB driver methods prepend working directory implicitly.
CloudBlockBlob blob = testAccount.getBlobReference("fileX");
BlobOutputStream s = blob.openOutputStream();
s.close();
Path srcFilePath = new Path("/fileX");
assertTrue(fs.exists(srcFilePath));
Path destFilePath = new Path("/fileXrename");
fs.rename(srcFilePath, destFilePath);
}
}
| 7,604 | 36.279412 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/InMemoryBlockBlobStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
* A simple memory key-value store to help mock the Windows Azure Storage
* implementation for unit testing.
*/
public class InMemoryBlockBlobStore {
private final HashMap<String, Entry> blobs = new HashMap<String, Entry>();
private HashMap<String, String> containerMetadata;
public synchronized Iterable<String> getKeys() {
return new ArrayList<String>(blobs.keySet());
}
public static class ListBlobEntry {
private final String key;
private final HashMap<String, String> metadata;
private final int contentLength;
private final boolean isPageBlob;
ListBlobEntry(String key, HashMap<String, String> metadata,
int contentLength, boolean isPageBlob) {
this.key = key;
this.metadata = metadata;
this.contentLength = contentLength;
this.isPageBlob = isPageBlob;
}
public String getKey() {
return key;
}
public HashMap<String, String> getMetadata() {
return metadata;
}
public int getContentLength() {
return contentLength;
}
public boolean isPageBlob() {
return isPageBlob;
}
}
/**
* List all the blobs whose key starts with the given prefix.
*
* @param prefix
* The prefix to check.
* @param includeMetadata
* If set, the metadata in the returned listing will be populated;
* otherwise it'll be null.
* @return The listing.
*/
public synchronized Iterable<ListBlobEntry> listBlobs(String prefix,
boolean includeMetadata) {
ArrayList<ListBlobEntry> list = new ArrayList<ListBlobEntry>();
for (Map.Entry<String, Entry> entry : blobs.entrySet()) {
if (entry.getKey().startsWith(prefix)) {
list.add(new ListBlobEntry(
entry.getKey(),
includeMetadata ?
new HashMap<String, String>(entry.getValue().metadata) :
null,
entry.getValue().content.length,
entry.getValue().isPageBlob));
}
}
return list;
}
public synchronized byte[] getContent(String key) {
return blobs.get(key).content;
}
@SuppressWarnings("unchecked")
public synchronized void setContent(String key, byte[] value,
HashMap<String, String> metadata, boolean isPageBlob,
long length) {
blobs.put(key, new Entry(value, (HashMap<String, String>)metadata.clone(),
isPageBlob, length));
}
@SuppressWarnings("unchecked")
public synchronized void setMetadata(String key,
HashMap<String, String> metadata) {
blobs.get(key).metadata = (HashMap<String, String>) metadata.clone();
}
public OutputStream uploadBlockBlob(final String key,
final HashMap<String, String> metadata) {
setContent(key, new byte[0], metadata, false, 0);
return new ByteArrayOutputStream() {
@Override
public void flush()
throws IOException {
super.flush();
byte[] tempBytes = toByteArray();
setContent(key, tempBytes, metadata, false, tempBytes.length);
}
@Override
public void close()
throws IOException {
super.close();
byte[] tempBytes = toByteArray();
setContent(key, tempBytes, metadata, false, tempBytes.length);
}
};
}
public OutputStream uploadPageBlob(final String key,
final HashMap<String, String> metadata,
final long length) {
setContent(key, new byte[0], metadata, true, length);
return new ByteArrayOutputStream() {
@Override
public void flush()
throws IOException {
super.flush();
setContent(key, toByteArray(), metadata, true, length);
}
};
}
public synchronized void copy(String sourceKey, String destKey) {
blobs.put(destKey, blobs.get(sourceKey));
}
public synchronized void delete(String key) {
blobs.remove(key);
}
public synchronized boolean exists(String key) {
return blobs.containsKey(key);
}
@SuppressWarnings("unchecked")
public synchronized HashMap<String, String> getMetadata(String key) {
return (HashMap<String, String>) blobs.get(key).metadata.clone();
}
public synchronized HashMap<String, String> getContainerMetadata() {
return containerMetadata;
}
public synchronized void setContainerMetadata(HashMap<String, String> metadata) {
containerMetadata = metadata;
}
private static class Entry {
private byte[] content;
private HashMap<String, String> metadata;
private boolean isPageBlob;
@SuppressWarnings("unused") // TODO: use it
private long length;
public Entry(byte[] content, HashMap<String, String> metadata,
boolean isPageBlob, long length) {
this.content = content;
this.metadata = metadata;
this.isPageBlob = isPageBlob;
this.length = length;
}
}
}
| 5,839 | 29.259067 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.net.URI;
import java.util.StringTokenizer;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.log4j.Logger;
import org.junit.Test;
/**
* Test to validate Azure storage client side logging. Tests works only when
* testing with Live Azure storage because Emulator does not have support for
* client-side logging.
*
*/
public class TestNativeAzureFileSystemClientLogging extends
NativeAzureFileSystemBaseTest {
private AzureBlobStorageTestAccount testAccount;
// Core-site config controlling Azure Storage Client logging
private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging";
// Temporary directory created using WASB.
private static final String TEMP_DIR = "tempDir";
/*
* Helper method to verify the client logging is working. This check primarily
* checks to make sure we see a line in the logs corresponding to the entity
* that is created during test run.
*/
private boolean verifyStorageClientLogs(String capturedLogs, String entity)
throws Exception {
URI uri = testAccount.getRealAccount().getBlobEndpoint();
String container = testAccount.getRealContainer().getName();
String validateString = uri + Path.SEPARATOR + container + Path.SEPARATOR
+ entity;
boolean entityFound = false;
StringTokenizer tokenizer = new StringTokenizer(capturedLogs, "\n");
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (token.contains(validateString)) {
entityFound = true;
break;
}
}
return entityFound;
}
/*
* Helper method that updates the core-site config to enable/disable logging.
*/
private void updateFileSystemConfiguration(Boolean loggingFlag)
throws Exception {
Configuration conf = fs.getConf();
conf.set(KEY_LOGGING_CONF_STRING, loggingFlag.toString());
URI uri = fs.getUri();
fs.initialize(uri, conf);
}
// Using WASB code to communicate with Azure Storage.
private void performWASBOperations() throws Exception {
Path tempDir = new Path(Path.SEPARATOR + TEMP_DIR);
fs.mkdirs(tempDir);
fs.delete(tempDir, true);
}
@Test
public void testLoggingEnabled() throws Exception {
LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
.getRootLogger()));
// Update configuration based on the Test.
updateFileSystemConfiguration(true);
performWASBOperations();
assertTrue(verifyStorageClientLogs(logs.getOutput(), TEMP_DIR));
}
@Test
public void testLoggingDisabled() throws Exception {
LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
.getRootLogger()));
// Update configuration based on the Test.
updateFileSystemConfiguration(false);
performWASBOperations();
assertFalse(verifyStorageClientLogs(logs.getOutput(), TEMP_DIR));
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
return testAccount;
}
}
| 4,152 | 30.946154 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemOperationsMocked.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assume.assumeTrue;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class TestNativeAzureFileSystemOperationsMocked extends
FSMainOperationsBaseTest {
private static final String TEST_ROOT_DIR =
"/tmp/TestNativeAzureFileSystemOperationsMocked";
public TestNativeAzureFileSystemOperationsMocked (){
super(TEST_ROOT_DIR);
}
@Override
public void setUp() throws Exception {
fSys = AzureBlobStorageTestAccount.createMock().getFileSystem();
}
@Override
protected FileSystem createFileSystem() throws Exception {
return AzureBlobStorageTestAccount.createMock().getFileSystem();
}
public void testListStatusThrowsExceptionForUnreadableDir() throws Exception {
System.out
.println("Skipping testListStatusThrowsExceptionForUnreadableDir since WASB"
+ " doesn't honor directory permissions.");
assumeTrue(!Path.WINDOWS);
}
@Override
public String getTestRootDir() {
return TEST_ROOT_DIR;
}
@Override
public Path getTestRootPath(FileSystem fSys) {
return fSys.makeQualified(new Path(TEST_ROOT_DIR));
}
@Override
public Path getTestRootPath(FileSystem fSys, String pathString) {
return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
}
@Override
public Path getAbsoluteTestRootPath(FileSystem fSys) {
Path testRootPath = new Path(TEST_ROOT_DIR);
if (testRootPath.isAbsolute()) {
return testRootPath;
} else {
return new Path(fSys.getWorkingDirectory(), TEST_ROOT_DIR);
}
}
}
| 2,477 | 30.367089 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
/*
* Tests the Native Azure file system (WASB) against an actual blob store if
* provided in the environment.
*/
public class TestNativeAzureFileSystemLive extends
NativeAzureFileSystemBaseTest {
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
/**
* Check that isPageBlobKey works as expected. This assumes that
* in the test configuration, the list of supported page blob directories
* only includes "pageBlobs". That's why this test is made specific
* to this subclass.
*/
@Test
public void testIsPageBlobKey() {
AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
// Use literal strings so it's easier to understand the tests.
// In case the constant changes, we want to know about it so we can update this test.
assertEquals(AzureBlobStorageTestAccount.DEFAULT_PAGE_BLOB_DIRECTORY, "pageBlobs");
// URI prefix for test environment.
String uriPrefix = "file:///";
// negative tests
String[] negativeKeys = { "", "/", "bar", "bar/", "bar/pageBlobs", "bar/pageBlobs/foo",
"bar/pageBlobs/foo/", "/pageBlobs/", "/pageBlobs", "pageBlobs", "pageBlobsxyz/" };
for (String s : negativeKeys) {
assertFalse(store.isPageBlobKey(s));
assertFalse(store.isPageBlobKey(uriPrefix + s));
}
// positive tests
String[] positiveKeys = { "pageBlobs/", "pageBlobs/foo/", "pageBlobs/foo/bar/" };
for (String s : positiveKeys) {
assertTrue(store.isPageBlobKey(s));
assertTrue(store.isPageBlobKey(uriPrefix + s));
}
}
/**
* Test that isAtomicRenameKey() works as expected.
*/
@Test
public void testIsAtomicRenameKey() {
AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
// We want to know if the default configuration changes so we can fix
// this test.
assertEquals(AzureBlobStorageTestAccount.DEFAULT_ATOMIC_RENAME_DIRECTORIES,
"/atomicRenameDir1,/atomicRenameDir2");
// URI prefix for test environment.
String uriPrefix = "file:///";
// negative tests
String[] negativeKeys = { "", "/", "bar", "bar/", "bar/hbase",
"bar/hbase/foo", "bar/hbase/foo/", "/hbase/", "/hbase", "hbase",
"hbasexyz/", "foo/atomicRenameDir1/"};
for (String s : negativeKeys) {
assertFalse(store.isAtomicRenameKey(s));
assertFalse(store.isAtomicRenameKey(uriPrefix + s));
}
// Positive tests. The directories for atomic rename are /hbase
// plus the ones in the configuration (DEFAULT_ATOMIC_RENAME_DIRECTORIES
// for this test).
String[] positiveKeys = { "hbase/", "hbase/foo/", "hbase/foo/bar/",
"atomicRenameDir1/foo/", "atomicRenameDir2/bar/"};
for (String s : positiveKeys) {
assertTrue(store.isAtomicRenameKey(s));
assertTrue(store.isAtomicRenameKey(uriPrefix + s));
}
}
}
| 3,945 | 35.537037 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSPageBlobLive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.conf.Configuration;
/**
* Run the base Azure file system tests strictly on page blobs to make sure fundamental
* operations on page blob files and folders work as expected.
* These operations include create, delete, rename, list, and so on.
*/
public class TestNativeAzureFSPageBlobLive extends
NativeAzureFileSystemBaseTest {
@Override
protected AzureBlobStorageTestAccount createTestAccount()
throws Exception {
Configuration conf = new Configuration();
// Configure the page blob directories key so every file created is a page blob.
conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
// Configure the atomic rename directories key so every folder will have
// atomic rename applied.
conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
return AzureBlobStorageTestAccount.create(conf);
}
}
| 1,752 | 38.840909 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.IOException;
import org.junit.Ignore;
public class TestNativeAzureFileSystemMocked extends
NativeAzureFileSystemBaseTest {
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.createMock();
}
// Ignore the following tests because taking a lease requires a real
// (not mock) file system store. These tests don't work on the mock.
@Override
@Ignore
public void testLeaseAsDistributedLock() {
}
@Override
@Ignore
public void testSelfRenewingLease() {
}
@Override
@Ignore
public void testRedoFolderRenameAll() {
}
@Override
@Ignore
public void testCreateNonRecursive() {
}
@Override
@Ignore
public void testSelfRenewingLeaseFileDelete() {
}
@Override
@Ignore
public void testRenameRedoFolderAlreadyDone() throws IOException{
}
}
| 1,729 | 26.03125 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeNotNull;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.TimeZone;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlob;
/*
* Tests the Native Azure file system (WASB) against an actual blob store if
* provided in the environment.
* Subclasses implement createTestAccount() to hit local&mock storage with the same test code.
*
* For hand-testing: remove "abstract" keyword and copy in an implementation of createTestAccount
* from one of the subclasses
*/
public abstract class NativeAzureFileSystemBaseTest {
protected FileSystem fs;
private AzureBlobStorageTestAccount testAccount;
private final long modifiedTimeErrorMargin = 5 * 1000; // Give it +/-5 seconds
protected abstract AzureBlobStorageTestAccount createTestAccount() throws Exception;
public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class);
@Before
public void setUp() throws Exception {
testAccount = createTestAccount();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(testAccount);
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
}
@Test
public void testCheckingNonExistentOneLetterFile() throws Exception {
assertFalse(fs.exists(new Path("/a")));
}
@Test
public void testStoreRetrieveFile() throws Exception {
Path testFile = new Path("unit-test-file");
writeString(testFile, "Testing");
assertTrue(fs.exists(testFile));
FileStatus status = fs.getFileStatus(testFile);
assertNotNull(status);
// By default, files should be have masked permissions
// that grant RW to user, and R to group/other
assertEquals(new FsPermission((short) 0644), status.getPermission());
assertEquals("Testing", readString(testFile));
fs.delete(testFile, true);
}
@Test
public void testStoreDeleteFolder() throws Exception {
Path testFolder = new Path("storeDeleteFolder");
assertFalse(fs.exists(testFolder));
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.exists(testFolder));
FileStatus status = fs.getFileStatus(testFolder);
assertNotNull(status);
assertTrue(status.isDirectory());
// By default, directories should be have masked permissions
// that grant RWX to user, and RX to group/other
assertEquals(new FsPermission((short) 0755), status.getPermission());
Path innerFile = new Path(testFolder, "innerFile");
assertTrue(fs.createNewFile(innerFile));
assertTrue(fs.exists(innerFile));
assertTrue(fs.delete(testFolder, true));
assertFalse(fs.exists(innerFile));
assertFalse(fs.exists(testFolder));
}
@Test
public void testFileOwnership() throws Exception {
Path testFile = new Path("ownershipTestFile");
writeString(testFile, "Testing");
testOwnership(testFile);
}
@Test
public void testFolderOwnership() throws Exception {
Path testFolder = new Path("ownershipTestFolder");
fs.mkdirs(testFolder);
testOwnership(testFolder);
}
private void testOwnership(Path pathUnderTest) throws IOException {
FileStatus ret = fs.getFileStatus(pathUnderTest);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
assertTrue(ret.getOwner().equals(currentUser.getShortUserName()));
fs.delete(pathUnderTest, true);
}
private static FsPermission ignoreStickyBit(FsPermission original) {
return new FsPermission(original.getUserAction(),
original.getGroupAction(), original.getOtherAction());
}
// When FsPermission applies a UMask, it loses sticky bit information.
// And since we always apply UMask, we should ignore whether the sticky
// bit is equal or not.
private static void assertEqualsIgnoreStickyBit(FsPermission expected,
FsPermission actual) {
assertEquals(ignoreStickyBit(expected), ignoreStickyBit(actual));
}
@Test
public void testFilePermissions() throws Exception {
Path testFile = new Path("permissionTestFile");
FsPermission permission = FsPermission.createImmutable((short) 644);
createEmptyFile(testFile, permission);
FileStatus ret = fs.getFileStatus(testFile);
assertEqualsIgnoreStickyBit(permission, ret.getPermission());
fs.delete(testFile, true);
}
@Test
public void testFolderPermissions() throws Exception {
Path testFolder = new Path("permissionTestFolder");
FsPermission permission = FsPermission.createImmutable((short) 644);
fs.mkdirs(testFolder, permission);
FileStatus ret = fs.getFileStatus(testFolder);
assertEqualsIgnoreStickyBit(permission, ret.getPermission());
fs.delete(testFolder, true);
}
void testDeepFileCreationBase(String testFilePath, String firstDirPath, String middleDirPath,
short permissionShort, short umaskedPermissionShort) throws Exception {
Path testFile = new Path(testFilePath);
Path firstDir = new Path(firstDirPath);
Path middleDir = new Path(middleDirPath);
FsPermission permission = FsPermission.createImmutable(permissionShort);
FsPermission umaskedPermission = FsPermission.createImmutable(umaskedPermissionShort);
createEmptyFile(testFile, permission);
FsPermission rootPerm = fs.getFileStatus(firstDir.getParent()).getPermission();
FsPermission inheritPerm = FsPermission.createImmutable((short)(rootPerm.toShort() | 0300));
assertTrue(fs.exists(testFile));
assertTrue(fs.exists(firstDir));
assertTrue(fs.exists(middleDir));
// verify that the indirectly created directory inherited its permissions from the root directory
FileStatus directoryStatus = fs.getFileStatus(middleDir);
assertTrue(directoryStatus.isDirectory());
assertEqualsIgnoreStickyBit(inheritPerm, directoryStatus.getPermission());
// verify that the file itself has the permissions as specified
FileStatus fileStatus = fs.getFileStatus(testFile);
assertFalse(fileStatus.isDirectory());
assertEqualsIgnoreStickyBit(umaskedPermission, fileStatus.getPermission());
assertTrue(fs.delete(firstDir, true));
assertFalse(fs.exists(testFile));
// An alternative test scenario would've been to delete the file first,
// and then check for the existence of the upper folders still. But that
// doesn't actually work as expected right now.
}
@Test
public void testDeepFileCreation() throws Exception {
// normal permissions in user home
testDeepFileCreationBase("deep/file/creation/test", "deep", "deep/file/creation", (short)0644, (short)0644);
// extra permissions in user home. umask will change the actual permissions.
testDeepFileCreationBase("deep/file/creation/test", "deep", "deep/file/creation", (short)0777, (short)0755);
// normal permissions in root
testDeepFileCreationBase("/deep/file/creation/test", "/deep", "/deep/file/creation", (short)0644, (short)0644);
// less permissions in root
testDeepFileCreationBase("/deep/file/creation/test", "/deep", "/deep/file/creation", (short)0700, (short)0700);
// one indirectly created directory in root
testDeepFileCreationBase("/deep/file", "/deep", "/deep", (short)0644, (short)0644);
// one indirectly created directory in user home
testDeepFileCreationBase("deep/file", "deep", "deep", (short)0644, (short)0644);
}
private static enum RenameVariation {
NormalFileName, SourceInAFolder, SourceWithSpace, SourceWithPlusAndPercent
}
@Test
public void testRename() throws Exception {
for (RenameVariation variation : RenameVariation.values()) {
System.out.printf("Rename variation: %s\n", variation);
Path originalFile;
switch (variation) {
case NormalFileName:
originalFile = new Path("fileToRename");
break;
case SourceInAFolder:
originalFile = new Path("file/to/rename");
break;
case SourceWithSpace:
originalFile = new Path("file to rename");
break;
case SourceWithPlusAndPercent:
originalFile = new Path("file+to%rename");
break;
default:
throw new Exception("Unknown variation");
}
Path destinationFile = new Path("file/resting/destination");
assertTrue(fs.createNewFile(originalFile));
assertTrue(fs.exists(originalFile));
assertFalse(fs.rename(originalFile, destinationFile)); // Parent directory
// doesn't exist
assertTrue(fs.mkdirs(destinationFile.getParent()));
boolean result = fs.rename(originalFile, destinationFile);
assertTrue(result);
assertTrue(fs.exists(destinationFile));
assertFalse(fs.exists(originalFile));
fs.delete(destinationFile.getParent(), true);
}
}
@Test
public void testRenameImplicitFolder() throws Exception {
Path testFile = new Path("deep/file/rename/test");
FsPermission permission = FsPermission.createImmutable((short) 644);
createEmptyFile(testFile, permission);
boolean renameResult = fs.rename(new Path("deep/file"), new Path("deep/renamed"));
assertTrue(renameResult);
assertFalse(fs.exists(testFile));
FileStatus newStatus = fs.getFileStatus(new Path("deep/renamed/rename/test"));
assertNotNull(newStatus);
assertEqualsIgnoreStickyBit(permission, newStatus.getPermission());
assertTrue(fs.delete(new Path("deep"), true));
}
private static enum RenameFolderVariation {
CreateFolderAndInnerFile, CreateJustInnerFile, CreateJustFolder
}
@Test
public void testRenameFolder() throws Exception {
for (RenameFolderVariation variation : RenameFolderVariation.values()) {
Path originalFolder = new Path("folderToRename");
if (variation != RenameFolderVariation.CreateJustInnerFile) {
assertTrue(fs.mkdirs(originalFolder));
}
Path innerFile = new Path(originalFolder, "innerFile");
Path innerFile2 = new Path(originalFolder, "innerFile2");
if (variation != RenameFolderVariation.CreateJustFolder) {
assertTrue(fs.createNewFile(innerFile));
assertTrue(fs.createNewFile(innerFile2));
}
Path destination = new Path("renamedFolder");
assertTrue(fs.rename(originalFolder, destination));
assertTrue(fs.exists(destination));
if (variation != RenameFolderVariation.CreateJustFolder) {
assertTrue(fs.exists(new Path(destination, innerFile.getName())));
assertTrue(fs.exists(new Path(destination, innerFile2.getName())));
}
assertFalse(fs.exists(originalFolder));
assertFalse(fs.exists(innerFile));
assertFalse(fs.exists(innerFile2));
fs.delete(destination, true);
}
}
@Test
public void testCopyFromLocalFileSystem() throws Exception {
Path localFilePath = new Path(System.getProperty("test.build.data",
"azure_test"));
FileSystem localFs = FileSystem.get(new Configuration());
localFs.delete(localFilePath, true);
try {
writeString(localFs, localFilePath, "Testing");
Path dstPath = new Path("copiedFromLocal");
assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false,
fs.getConf()));
assertTrue(fs.exists(dstPath));
assertEquals("Testing", readString(fs, dstPath));
fs.delete(dstPath, true);
} finally {
localFs.delete(localFilePath, true);
}
}
@Test
public void testListDirectory() throws Exception {
Path rootFolder = new Path("testingList");
assertTrue(fs.mkdirs(rootFolder));
FileStatus[] listed = fs.listStatus(rootFolder);
assertEquals(0, listed.length);
Path innerFolder = new Path(rootFolder, "inner");
assertTrue(fs.mkdirs(innerFolder));
listed = fs.listStatus(rootFolder);
assertEquals(1, listed.length);
assertTrue(listed[0].isDirectory());
Path innerFile = new Path(innerFolder, "innerFile");
writeString(innerFile, "testing");
listed = fs.listStatus(rootFolder);
assertEquals(1, listed.length);
assertTrue(listed[0].isDirectory());
listed = fs.listStatus(innerFolder);
assertEquals(1, listed.length);
assertFalse(listed[0].isDirectory());
assertTrue(fs.delete(rootFolder, true));
}
@Test
public void testStatistics() throws Exception {
FileSystem.clearStatistics();
FileSystem.Statistics stats = FileSystem.getStatistics("wasb",
NativeAzureFileSystem.class);
assertEquals(0, stats.getBytesRead());
assertEquals(0, stats.getBytesWritten());
Path newFile = new Path("testStats");
writeString(newFile, "12345678");
assertEquals(8, stats.getBytesWritten());
assertEquals(0, stats.getBytesRead());
String readBack = readString(newFile);
assertEquals("12345678", readBack);
assertEquals(8, stats.getBytesRead());
assertEquals(8, stats.getBytesWritten());
assertTrue(fs.delete(newFile, true));
assertEquals(8, stats.getBytesRead());
assertEquals(8, stats.getBytesWritten());
}
@Test
public void testUriEncoding() throws Exception {
fs.create(new Path("p/t%5Fe")).close();
FileStatus[] listing = fs.listStatus(new Path("p"));
assertEquals(1, listing.length);
assertEquals("t%5Fe", listing[0].getPath().getName());
assertTrue(fs.rename(new Path("p"), new Path("q")));
assertTrue(fs.delete(new Path("q"), true));
}
@Test
public void testUriEncodingMoreComplexCharacters() throws Exception {
// Create a file name with URI reserved characters, plus the percent
String fileName = "!#$'()*;=[]%";
String directoryName = "*;=[]%!#$'()";
fs.create(new Path(directoryName, fileName)).close();
FileStatus[] listing = fs.listStatus(new Path(directoryName));
assertEquals(1, listing.length);
assertEquals(fileName, listing[0].getPath().getName());
FileStatus status = fs.getFileStatus(new Path(directoryName, fileName));
assertEquals(fileName, status.getPath().getName());
InputStream stream = fs.open(new Path(directoryName, fileName));
assertNotNull(stream);
stream.close();
assertTrue(fs.delete(new Path(directoryName, fileName), true));
assertTrue(fs.delete(new Path(directoryName), true));
}
@Test
public void testChineseCharacters() throws Exception {
// Create a file and a folder with Chinese (non-ASCI) characters
String chinese = "" + '\u963f' + '\u4db5';
String fileName = "filename" + chinese;
String directoryName = chinese;
fs.create(new Path(directoryName, fileName)).close();
FileStatus[] listing = fs.listStatus(new Path(directoryName));
assertEquals(1, listing.length);
assertEquals(fileName, listing[0].getPath().getName());
FileStatus status = fs.getFileStatus(new Path(directoryName, fileName));
assertEquals(fileName, status.getPath().getName());
InputStream stream = fs.open(new Path(directoryName, fileName));
assertNotNull(stream);
stream.close();
assertTrue(fs.delete(new Path(directoryName, fileName), true));
assertTrue(fs.delete(new Path(directoryName), true));
}
@Test
public void testChineseCharactersFolderRename() throws Exception {
// Create a file and a folder with Chinese (non-ASCI) characters
String chinese = "" + '\u963f' + '\u4db5';
String fileName = "filename" + chinese;
String srcDirectoryName = chinese;
String targetDirectoryName = "target" + chinese;
fs.create(new Path(srcDirectoryName, fileName)).close();
fs.rename(new Path(srcDirectoryName), new Path(targetDirectoryName));
FileStatus[] listing = fs.listStatus(new Path(targetDirectoryName));
assertEquals(1, listing.length);
assertEquals(fileName, listing[0].getPath().getName());
FileStatus status = fs.getFileStatus(new Path(targetDirectoryName, fileName));
assertEquals(fileName, status.getPath().getName());
assertTrue(fs.delete(new Path(targetDirectoryName, fileName), true));
assertTrue(fs.delete(new Path(targetDirectoryName), true));
}
@Test
public void testReadingDirectoryAsFile() throws Exception {
Path dir = new Path("/x");
assertTrue(fs.mkdirs(dir));
try {
fs.open(dir).close();
assertTrue("Should've thrown", false);
} catch (FileNotFoundException ex) {
assertEquals("/x is a directory not a file.", ex.getMessage());
}
}
@Test
public void testCreatingFileOverDirectory() throws Exception {
Path dir = new Path("/x");
assertTrue(fs.mkdirs(dir));
try {
fs.create(dir).close();
assertTrue("Should've thrown", false);
} catch (IOException ex) {
assertEquals("Cannot create file /x; already exists as a directory.",
ex.getMessage());
}
}
@Test
public void testInputStreamReadWithZeroSizeBuffer() throws Exception {
Path newFile = new Path("zeroSizeRead");
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
InputStream input = fs.open(newFile);
int result = input.read(new byte[2], 0, 0);
assertEquals(0, result);
}
@Test
public void testInputStreamReadWithBufferReturnsMinusOneOnEof() throws Exception {
Path newFile = new Path("eofRead");
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
// Read first byte back
InputStream input = fs.open(newFile);
byte[] buff = new byte[1];
int result = input.read(buff, 0, 1);
assertEquals(1, result);
assertEquals(10, buff[0]);
// Issue another read and make sure it returns -1
buff[0] = 2;
result = input.read(buff, 0, 1);
assertEquals(-1, result);
// Buffer is intact
assertEquals(2, buff[0]);
}
@Test
public void testInputStreamReadWithBufferReturnsMinusOneOnEofForLargeBuffer() throws Exception {
Path newFile = new Path("eofRead2");
OutputStream output = fs.create(newFile);
byte[] outputBuff = new byte[97331];
for(int i = 0; i < outputBuff.length; ++i) {
outputBuff[i] = (byte)(Math.random() * 255);
}
output.write(outputBuff);
output.close();
// Read the content of the file
InputStream input = fs.open(newFile);
byte[] buff = new byte[131072];
int result = input.read(buff, 0, buff.length);
assertEquals(outputBuff.length, result);
for(int i = 0; i < outputBuff.length; ++i) {
assertEquals(outputBuff[i], buff[i]);
}
// Issue another read and make sure it returns -1
buff = new byte[131072];
result = input.read(buff, 0, buff.length);
assertEquals(-1, result);
}
@Test
public void testInputStreamReadIntReturnsMinusOneOnEof() throws Exception {
Path newFile = new Path("eofRead3");
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
// Read first byte back
InputStream input = fs.open(newFile);
int value = input.read();
assertEquals(10, value);
// Issue another read and make sure it returns -1
value = input.read();
assertEquals(-1, value);
}
@Test
public void testSetPermissionOnFile() throws Exception {
Path newFile = new Path("testPermission");
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
FsPermission newPermission = new FsPermission((short) 0700);
fs.setPermission(newFile, newPermission);
FileStatus newStatus = fs.getFileStatus(newFile);
assertNotNull(newStatus);
assertEquals(newPermission, newStatus.getPermission());
assertEquals("supergroup", newStatus.getGroup());
assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
newStatus.getOwner());
// Don't check the file length for page blobs. Only block blobs
// provide the actual length of bytes written.
if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
}
@Test
public void testSetPermissionOnFolder() throws Exception {
Path newFolder = new Path("testPermission");
assertTrue(fs.mkdirs(newFolder));
FsPermission newPermission = new FsPermission((short) 0600);
fs.setPermission(newFolder, newPermission);
FileStatus newStatus = fs.getFileStatus(newFolder);
assertNotNull(newStatus);
assertEquals(newPermission, newStatus.getPermission());
assertTrue(newStatus.isDirectory());
}
@Test
public void testSetOwnerOnFile() throws Exception {
Path newFile = new Path("testOwner");
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
fs.setOwner(newFile, "newUser", null);
FileStatus newStatus = fs.getFileStatus(newFile);
assertNotNull(newStatus);
assertEquals("newUser", newStatus.getOwner());
assertEquals("supergroup", newStatus.getGroup());
// File length is only reported to be the size of bytes written to the file for block blobs.
// So only check it for block blobs, not page blobs.
if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
fs.setOwner(newFile, null, "newGroup");
newStatus = fs.getFileStatus(newFile);
assertNotNull(newStatus);
assertEquals("newUser", newStatus.getOwner());
assertEquals("newGroup", newStatus.getGroup());
}
@Test
public void testSetOwnerOnFolder() throws Exception {
Path newFolder = new Path("testOwner");
assertTrue(fs.mkdirs(newFolder));
fs.setOwner(newFolder, "newUser", null);
FileStatus newStatus = fs.getFileStatus(newFolder);
assertNotNull(newStatus);
assertEquals("newUser", newStatus.getOwner());
assertTrue(newStatus.isDirectory());
}
@Test
public void testModifiedTimeForFile() throws Exception {
Path testFile = new Path("testFile");
fs.create(testFile).close();
testModifiedTime(testFile);
}
@Test
public void testModifiedTimeForFolder() throws Exception {
Path testFolder = new Path("testFolder");
assertTrue(fs.mkdirs(testFolder));
testModifiedTime(testFolder);
}
@Test
public void testFolderLastModifiedTime() throws Exception {
Path parentFolder = new Path("testFolder");
Path innerFile = new Path(parentFolder, "innerfile");
assertTrue(fs.mkdirs(parentFolder));
// Create file
long lastModifiedTime = fs.getFileStatus(parentFolder)
.getModificationTime();
// Wait at least the error margin
Thread.sleep(modifiedTimeErrorMargin + 1);
assertTrue(fs.createNewFile(innerFile));
// The parent folder last modified time should have changed because we
// create an inner file.
assertFalse(testModifiedTime(parentFolder, lastModifiedTime));
testModifiedTime(parentFolder);
// Rename file
lastModifiedTime = fs.getFileStatus(parentFolder).getModificationTime();
Path destFolder = new Path("testDestFolder");
assertTrue(fs.mkdirs(destFolder));
long destLastModifiedTime = fs.getFileStatus(destFolder)
.getModificationTime();
Thread.sleep(modifiedTimeErrorMargin + 1);
Path destFile = new Path(destFolder, "innerfile");
assertTrue(fs.rename(innerFile, destFile));
// Both source and destination folder last modified time should have changed
// because of renaming.
assertFalse(testModifiedTime(parentFolder, lastModifiedTime));
assertFalse(testModifiedTime(destFolder, destLastModifiedTime));
testModifiedTime(parentFolder);
testModifiedTime(destFolder);
// Delete file
destLastModifiedTime = fs.getFileStatus(destFolder).getModificationTime();
// Wait at least the error margin
Thread.sleep(modifiedTimeErrorMargin + 1);
fs.delete(destFile, false);
// The parent folder last modified time should have changed because we
// delete an inner file.
assertFalse(testModifiedTime(destFolder, destLastModifiedTime));
testModifiedTime(destFolder);
}
/**
* Verify we can get file status of a directory with various forms of
* the directory file name, including the nonstandard but legal form
* ending in "/.". Check that we're getting status for a directory.
*/
@Test
public void testListSlash() throws Exception {
Path testFolder = new Path("/testFolder");
Path testFile = new Path(testFolder, "testFile");
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.createNewFile(testFile));
FileStatus status;
status = fs.getFileStatus(new Path("/testFolder"));
assertTrue(status.isDirectory());
status = fs.getFileStatus(new Path("/testFolder/"));
assertTrue(status.isDirectory());
status = fs.getFileStatus(new Path("/testFolder/."));
assertTrue(status.isDirectory());
}
@Test
public void testCannotCreatePageBlobByDefault() throws Exception {
// Verify that the page blob directory list configuration setting
// is not set in the default configuration.
Configuration conf = new Configuration();
String[] rawPageBlobDirs =
conf.getStrings(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES);
assertTrue(rawPageBlobDirs == null);
}
/*
* Set up a situation where a folder rename is partway finished.
* Then apply redo to finish the rename.
*
* The original source folder *would* have had contents
* folderToRename (0 byte dummy file for directory)
* folderToRename/innerFile
* folderToRename/innerFile2
*
* The actual source folder (after partial rename and failure)
*
* folderToRename
* folderToRename/innerFile2
*
* The actual target folder (after partial rename and failure)
*
* renamedFolder
* renamedFolder/innerFile
*/
@Test
public void testRedoRenameFolder() throws IOException {
// create original folder
String srcKey = "folderToRename";
Path originalFolder = new Path(srcKey);
assertTrue(fs.mkdirs(originalFolder));
Path innerFile = new Path(originalFolder, "innerFile");
assertTrue(fs.createNewFile(innerFile));
Path innerFile2 = new Path(originalFolder, "innerFile2");
assertTrue(fs.createNewFile(innerFile2));
String dstKey = "renamedFolder";
// propose (but don't do) the rename
Path home = fs.getHomeDirectory();
String relativeHomeDir = getRelativePath(home.toString());
NativeAzureFileSystem.FolderRenamePending pending =
new NativeAzureFileSystem.FolderRenamePending(
relativeHomeDir + "/" + srcKey,
relativeHomeDir + "/" + dstKey, null,
(NativeAzureFileSystem) fs);
// get the rename pending file contents
String renameDescription = pending.makeRenamePendingFileContents();
// Remove one file from source folder to simulate a partially done
// rename operation.
assertTrue(fs.delete(innerFile, false));
// Create the destination folder with just one file in it, again
// to simulate a partially done rename.
Path destination = new Path(dstKey);
Path innerDest = new Path(destination, "innerFile");
assertTrue(fs.createNewFile(innerDest));
// Create a rename-pending file and write rename information to it.
final String renamePendingStr = "folderToRename-RenamePending.json";
Path renamePendingFile = new Path(renamePendingStr);
FSDataOutputStream out = fs.create(renamePendingFile, true);
assertTrue(out != null);
writeString(out, renameDescription);
// Redo the rename operation based on the contents of the -RenamePending.json file.
// Trigger the redo by checking for existence of the original folder. It must appear
// to not exist.
assertFalse(fs.exists(originalFolder));
// Verify that the target is there, and the source is gone.
assertTrue(fs.exists(destination));
assertTrue(fs.exists(new Path(destination, innerFile.getName())));
assertTrue(fs.exists(new Path(destination, innerFile2.getName())));
assertFalse(fs.exists(originalFolder));
assertFalse(fs.exists(innerFile));
assertFalse(fs.exists(innerFile2));
// Verify that there's no RenamePending file left.
assertFalse(fs.exists(renamePendingFile));
// Verify that we can list the target directory.
FileStatus[] listed = fs.listStatus(destination);
assertEquals(2, listed.length);
// List the home directory and show the contents is a directory.
Path root = fs.getHomeDirectory();
listed = fs.listStatus(root);
assertEquals(1, listed.length);
assertTrue(listed[0].isDirectory());
}
/**
* If there is a folder to be renamed inside a parent folder,
* then when you list the parent folder, you should only see
* the final result, after the rename.
*/
@Test
public void testRedoRenameFolderInFolderListing() throws IOException {
// create original folder
String parent = "parent";
Path parentFolder = new Path(parent);
assertTrue(fs.mkdirs(parentFolder));
Path inner = new Path(parentFolder, "innerFolder");
assertTrue(fs.mkdirs(inner));
Path inner2 = new Path(parentFolder, "innerFolder2");
assertTrue(fs.mkdirs(inner2));
Path innerFile = new Path(inner2, "file");
assertTrue(fs.createNewFile(innerFile));
Path inner2renamed = new Path(parentFolder, "innerFolder2Renamed");
// propose (but don't do) the rename of innerFolder2
Path home = fs.getHomeDirectory();
String relativeHomeDir = getRelativePath(home.toString());
NativeAzureFileSystem.FolderRenamePending pending =
new NativeAzureFileSystem.FolderRenamePending(
relativeHomeDir + "/" + inner2,
relativeHomeDir + "/" + inner2renamed, null,
(NativeAzureFileSystem) fs);
// Create a rename-pending file and write rename information to it.
final String renamePendingStr = inner2 + FolderRenamePending.SUFFIX;
Path renamePendingFile = new Path(renamePendingStr);
FSDataOutputStream out = fs.create(renamePendingFile, true);
assertTrue(out != null);
writeString(out, pending.makeRenamePendingFileContents());
// Redo the rename operation based on the contents of the
// -RenamePending.json file. Trigger the redo by checking for existence of
// the original folder. It must appear to not exist.
FileStatus[] listed = fs.listStatus(parentFolder);
assertEquals(2, listed.length);
assertTrue(listed[0].isDirectory());
assertTrue(listed[1].isDirectory());
// The rename pending file is not a directory, so at this point we know the
// redo has been done.
assertFalse(fs.exists(inner2)); // verify original folder is gone
assertTrue(fs.exists(inner2renamed)); // verify the target is there
assertTrue(fs.exists(new Path(inner2renamed, "file")));
}
/**
* Test the situation where a rename pending file exists but the rename
* is really done. This could happen if the rename process died just
* before deleting the rename pending file. It exercises a non-standard
* code path in redo().
*/
@Test
public void testRenameRedoFolderAlreadyDone() throws IOException {
// create only destination folder
String orig = "originalFolder";
String dest = "renamedFolder";
Path destPath = new Path(dest);
assertTrue(fs.mkdirs(destPath));
// propose (but don't do) the rename of innerFolder2
Path home = fs.getHomeDirectory();
String relativeHomeDir = getRelativePath(home.toString());
NativeAzureFileSystem.FolderRenamePending pending =
new NativeAzureFileSystem.FolderRenamePending(
relativeHomeDir + "/" + orig,
relativeHomeDir + "/" + dest, null,
(NativeAzureFileSystem) fs);
// Create a rename-pending file and write rename information to it.
final String renamePendingStr = orig + FolderRenamePending.SUFFIX;
Path renamePendingFile = new Path(renamePendingStr);
FSDataOutputStream out = fs.create(renamePendingFile, true);
assertTrue(out != null);
writeString(out, pending.makeRenamePendingFileContents());
try {
pending.redo();
} catch (Exception e) {
fail();
}
// Make sure rename pending file is gone.
FileStatus[] listed = fs.listStatus(new Path("/"));
assertEquals(1, listed.length);
assertTrue(listed[0].isDirectory());
}
@Test
public void testRedoFolderRenameAll() throws IllegalArgumentException, IOException {
{
FileFolder original = new FileFolder("folderToRename");
original.add("innerFile").add("innerFile2");
FileFolder partialSrc = original.copy();
FileFolder partialDst = original.copy();
partialDst.setName("renamedFolder");
partialSrc.setPresent(0, false);
partialDst.setPresent(1, false);
testRenameRedoFolderSituation(original, partialSrc, partialDst);
}
{
FileFolder original = new FileFolder("folderToRename");
original.add("file1").add("file2").add("file3");
FileFolder partialSrc = original.copy();
FileFolder partialDst = original.copy();
partialDst.setName("renamedFolder");
// Set up this state before the redo:
// folderToRename: file1 file3
// renamedFolder: file1 file2
// This gives code coverage for all 3 expected cases for individual file
// redo.
partialSrc.setPresent(1, false);
partialDst.setPresent(2, false);
testRenameRedoFolderSituation(original, partialSrc, partialDst);
}
{
// Simulate a situation with folder with a large number of files in it.
// For the first half of the files, they will be in the destination
// but not the source. For the second half, they will be in the source
// but not the destination. There will be one file in the middle that is
// in both source and destination. Then trigger redo and verify.
// For testing larger folder sizes, manually change this, temporarily, and
// edit the SIZE value.
final int SIZE = 5;
assertTrue(SIZE >= 3);
// Try a lot of files in the folder.
FileFolder original = new FileFolder("folderToRename");
for (int i = 0; i < SIZE; i++) {
original.add("file" + Integer.toString(i));
}
FileFolder partialSrc = original.copy();
FileFolder partialDst = original.copy();
partialDst.setName("renamedFolder");
for (int i = 0; i < SIZE; i++) {
partialSrc.setPresent(i, i >= SIZE / 2);
partialDst.setPresent(i, i <= SIZE / 2);
}
testRenameRedoFolderSituation(original, partialSrc, partialDst);
}
{
// Do a nested folder, like so:
// folderToRename:
// nestedFolder: a, b, c
// p
// q
//
// Then delete file 'a' from the source and add it to destination.
// Then trigger redo.
FileFolder original = new FileFolder("folderToRename");
FileFolder nested = new FileFolder("nestedFolder");
nested.add("a").add("b").add("c");
original.add(nested).add("p").add("q");
FileFolder partialSrc = original.copy();
FileFolder partialDst = original.copy();
partialDst.setName("renamedFolder");
// logically remove 'a' from source
partialSrc.getMember(0).setPresent(0, false);
// logically eliminate b, c from destination
partialDst.getMember(0).setPresent(1, false);
partialDst.getMember(0).setPresent(2, false);
testRenameRedoFolderSituation(original, partialSrc, partialDst);
}
}
private void testRenameRedoFolderSituation(
FileFolder fullSrc,
FileFolder partialSrc,
FileFolder partialDst) throws IllegalArgumentException, IOException {
// make file folder tree for source
fullSrc.create();
// set up rename pending file
fullSrc.makeRenamePending(partialDst);
// prune away some files (as marked) from source to simulate partial rename
partialSrc.prune();
// Create only the files indicated for the destination to indicate a partial rename.
partialDst.create();
// trigger redo
assertFalse(fullSrc.exists());
// verify correct results
partialDst.verifyExists();
fullSrc.verifyGone();
// delete the new folder to leave no garbage behind
fs.delete(new Path(partialDst.getName()), true);
}
// Mock up of a generalized folder (which can also be a leaf-level file)
// for rename redo testing.
private class FileFolder {
private String name;
// For rename testing, indicates whether an expected
// file is present in the source or target folder.
private boolean present;
ArrayList<FileFolder> members; // Null if a leaf file, otherwise not null.
// Make a new, empty folder (not a regular leaf file).
public FileFolder(String name) {
this.name = name;
this.present = true;
members = new ArrayList<FileFolder>();
}
public FileFolder getMember(int i) {
return members.get(i);
}
// Verify a folder and all its contents are gone. This is only to
// be called on the root of a FileFolder.
public void verifyGone() throws IllegalArgumentException, IOException {
assertFalse(fs.exists(new Path(name)));
assertTrue(isFolder());
verifyGone(new Path(name), members);
}
private void verifyGone(Path prefix, ArrayList<FileFolder> members2) throws IOException {
for (FileFolder f : members2) {
f.verifyGone(prefix);
}
}
private void verifyGone(Path prefix) throws IOException {
assertFalse(fs.exists(new Path(prefix, name)));
if (isLeaf()) {
return;
}
for (FileFolder f : members) {
f.verifyGone(new Path(prefix, name));
}
}
public void verifyExists() throws IllegalArgumentException, IOException {
// verify the root is present
assertTrue(fs.exists(new Path(name)));
assertTrue(isFolder());
// check the members
verifyExists(new Path(name), members);
}
private void verifyExists(Path prefix, ArrayList<FileFolder> members2) throws IOException {
for (FileFolder f : members2) {
f.verifyExists(prefix);
}
}
private void verifyExists(Path prefix) throws IOException {
// verify this file/folder is present
assertTrue(fs.exists(new Path(prefix, name)));
// verify members are present
if (isLeaf()) {
return;
}
for (FileFolder f : members) {
f.verifyExists(new Path(prefix, name));
}
}
public boolean exists() throws IOException {
return fs.exists(new Path(name));
}
// Make a rename pending file for the situation where we rename
// this object (the source) to the specified destination.
public void makeRenamePending(FileFolder dst) throws IOException {
// Propose (but don't do) the rename.
Path home = fs.getHomeDirectory();
String relativeHomeDir = getRelativePath(home.toString());
NativeAzureFileSystem.FolderRenamePending pending =
new NativeAzureFileSystem.FolderRenamePending(
relativeHomeDir + "/" + this.getName(),
relativeHomeDir + "/" + dst.getName(), null,
(NativeAzureFileSystem) fs);
// Get the rename pending file contents.
String renameDescription = pending.makeRenamePendingFileContents();
// Create a rename-pending file and write rename information to it.
final String renamePendingStr = this.getName() + "-RenamePending.json";
Path renamePendingFile = new Path(renamePendingStr);
FSDataOutputStream out = fs.create(renamePendingFile, true);
assertTrue(out != null);
writeString(out, renameDescription);
}
// set whether a child is present or not
public void setPresent(int i, boolean b) {
members.get(i).setPresent(b);
}
// Make an uninitialized folder
private FileFolder() {
this.present = true;
}
public void setPresent(boolean value) {
present = value;
}
public FileFolder makeLeaf(String name) {
FileFolder f = new FileFolder();
f.setName(name);
return f;
}
void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
public boolean isLeaf() {
return members == null;
}
public boolean isFolder() {
return members != null;
}
FileFolder add(FileFolder folder) {
members.add(folder);
return this;
}
// Add a leaf file (by convention, if you pass a string argument, you get a leaf).
FileFolder add(String file) {
FileFolder leaf = makeLeaf(file);
members.add(leaf);
return this;
}
public FileFolder copy() {
if (isLeaf()) {
return makeLeaf(name);
} else {
FileFolder f = new FileFolder(name);
for (FileFolder member : members) {
f.add(member.copy());
}
return f;
}
}
// Create the folder structure. Return true on success, or else false.
public void create() throws IllegalArgumentException, IOException {
create(null);
}
private void create(Path prefix) throws IllegalArgumentException, IOException {
if (isFolder()) {
if (present) {
assertTrue(fs.mkdirs(makePath(prefix, name)));
}
create(makePath(prefix, name), members);
} else if (isLeaf()) {
if (present) {
assertTrue(fs.createNewFile(makePath(prefix, name)));
}
} else {
assertTrue("The object must be a (leaf) file or a folder.", false);
}
}
private void create(Path prefix, ArrayList<FileFolder> members2) throws IllegalArgumentException, IOException {
for (FileFolder f : members2) {
f.create(prefix);
}
}
private Path makePath(Path prefix, String name) {
if (prefix == null) {
return new Path(name);
} else {
return new Path(prefix, name);
}
}
// Remove the files marked as not present.
public void prune() throws IOException {
prune(null);
}
private void prune(Path prefix) throws IOException {
Path path = null;
if (prefix == null) {
path = new Path(name);
} else {
path = new Path(prefix, name);
}
if (isLeaf() && !present) {
assertTrue(fs.delete(path, false));
} else if (isFolder() && !present) {
assertTrue(fs.delete(path, true));
} else if (isFolder()) {
for (FileFolder f : members) {
f.prune(path);
}
}
}
}
private String getRelativePath(String path) {
// example input: wasb://[email protected]/user/ehans/folderToRename
// example result: user/ehans/folderToRename
// Find the third / position and return input substring after that.
int slashCount = 0; // number of slashes so far
int i;
for (i = 0; i < path.length(); i++) {
if (path.charAt(i) == '/') {
slashCount++;
if (slashCount == 3) {
return path.substring(i + 1, path.length());
}
}
}
throw new RuntimeException("Incorrect path prefix -- expected wasb://.../...");
}
@Test
public void testCloseFileSystemTwice() throws Exception {
//make sure close() can be called multiple times without doing any harm
fs.close();
fs.close();
}
// Test the available() method for the input stream returned by fs.open().
// This works for both page and block blobs.
int FILE_SIZE = 4 * 1024 * 1024 + 1; // Make this 1 bigger than internal
// buffer used in BlobInputStream
// to exercise that case.
int MAX_STRIDE = FILE_SIZE + 1;
Path PATH = new Path("/available.dat");
@Test
public void testAvailable() throws IOException {
// write FILE_SIZE bytes to page blob
FSDataOutputStream out = fs.create(PATH);
byte[] data = new byte[FILE_SIZE];
Arrays.fill(data, (byte) 5);
out.write(data, 0, FILE_SIZE);
out.close();
// Test available() for different read sizes
verifyAvailable(1);
verifyAvailable(100);
verifyAvailable(5000);
verifyAvailable(FILE_SIZE);
verifyAvailable(MAX_STRIDE);
fs.delete(PATH, false);
}
// Verify that available() for the input stream is always >= 1 unless we've
// consumed all the input, and then it is 0. This is to match expectations by
// HBase which were set based on behavior of DFSInputStream.available().
private void verifyAvailable(int readStride) throws IOException {
FSDataInputStream in = fs.open(PATH);
try {
byte[] inputBuffer = new byte[MAX_STRIDE];
int position = 0;
int bytesRead = 0;
while(bytesRead != FILE_SIZE) {
bytesRead += in.read(inputBuffer, position, readStride);
int available = in.available();
if (bytesRead < FILE_SIZE) {
if (available < 1) {
fail(String.format(
"expected available > 0 but got: "
+ "position = %d, bytesRead = %d, in.available() = %d",
position, bytesRead, available));
}
}
}
int available = in.available();
assertTrue(available == 0);
} finally {
in.close();
}
}
@Test
public void testGetFileSizeFromListing() throws IOException {
Path path = new Path("file.dat");
final int PAGE_SIZE = 512;
final int FILE_SIZE = PAGE_SIZE + 1;
// write FILE_SIZE bytes to page blob
FSDataOutputStream out = fs.create(path);
byte[] data = new byte[FILE_SIZE];
Arrays.fill(data, (byte) 5);
out.write(data, 0, FILE_SIZE);
out.close();
// list the file to get its properties
FileStatus[] status = fs.listStatus(path);
assertEquals(1, status.length);
// The file length should report the number of bytes
// written for either page or block blobs (subclasses
// of this test class will exercise both).
assertEquals(FILE_SIZE, status[0].getLen());
}
private boolean testModifiedTime(Path testPath, long time) throws Exception {
FileStatus fileStatus = fs.getFileStatus(testPath);
final long errorMargin = modifiedTimeErrorMargin;
long lastModified = fileStatus.getModificationTime();
return (lastModified > (time - errorMargin) && lastModified < (time + errorMargin));
}
@SuppressWarnings("deprecation")
@Test
public void testCreateNonRecursive() throws Exception {
Path testFolder = new Path("/testFolder");
Path testFile = new Path(testFolder, "testFile");
try {
fs.createNonRecursive(testFile, true, 1024, (short)1, 1024, null);
assertTrue("Should've thrown", false);
} catch (FileNotFoundException e) {
}
fs.mkdirs(testFolder);
fs.createNonRecursive(testFile, true, 1024, (short)1, 1024, null)
.close();
assertTrue(fs.exists(testFile));
}
public void testFileEndingInDot() throws Exception {
Path testFolder = new Path("/testFolder.");
Path testFile = new Path(testFolder, "testFile.");
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.createNewFile(testFile));
assertTrue(fs.exists(testFile));
FileStatus[] listed = fs.listStatus(testFolder);
assertEquals(1, listed.length);
assertEquals("testFile.", listed[0].getPath().getName());
}
private void testModifiedTime(Path testPath) throws Exception {
Calendar utc = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
long currentUtcTime = utc.getTime().getTime();
FileStatus fileStatus = fs.getFileStatus(testPath);
final long errorMargin = 10 * 1000; // Give it +/-10 seconds
assertTrue("Modification time " +
new Date(fileStatus.getModificationTime()) + " is not close to now: " +
utc.getTime(),
fileStatus.getModificationTime() > (currentUtcTime - errorMargin) &&
fileStatus.getModificationTime() < (currentUtcTime + errorMargin));
}
private void createEmptyFile(Path testFile, FsPermission permission)
throws IOException {
FSDataOutputStream outputStream = fs.create(testFile, permission, true,
4096, (short) 1, 1024, null);
outputStream.close();
}
private String readString(Path testFile) throws IOException {
return readString(fs, testFile);
}
private String readString(FileSystem fs, Path testFile) throws IOException {
FSDataInputStream inputStream = fs.open(testFile);
String ret = readString(inputStream);
inputStream.close();
return ret;
}
private String readString(FSDataInputStream inputStream) throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(
inputStream));
final int BUFFER_SIZE = 1024;
char[] buffer = new char[BUFFER_SIZE];
int count = reader.read(buffer, 0, BUFFER_SIZE);
if (count > BUFFER_SIZE) {
throw new IOException("Exceeded buffer size");
}
inputStream.close();
return new String(buffer, 0, count);
}
private void writeString(Path path, String value) throws IOException {
writeString(fs, path, value);
}
private void writeString(FileSystem fs, Path path, String value)
throws IOException {
FSDataOutputStream outputStream = fs.create(path, true);
writeString(outputStream, value);
}
private void writeString(FSDataOutputStream outputStream, String value)
throws IOException {
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
outputStream));
writer.write(value);
writer.close();
}
@Test
// Acquire and free a Lease object. Wait for more than the lease
// timeout, to make sure the lease renews itself.
public void testSelfRenewingLease() throws IllegalArgumentException, IOException,
InterruptedException, StorageException {
SelfRenewingLease lease;
final String FILE_KEY = "file";
fs.create(new Path(FILE_KEY));
NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(FILE_KEY)));
AzureNativeFileSystemStore store = nfs.getStore();
lease = store.acquireLease(fullKey);
assertTrue(lease.getLeaseID() != null);
// The sleep time for the keep-alive thread is 40 seconds, so sleep just
// a little beyond that, to make sure the keep-alive thread wakes up
// and renews the lease.
Thread.sleep(42000);
lease.free();
// Check that the lease is really freed.
CloudBlob blob = lease.getCloudBlob();
// Try to acquire it again, using direct Azure blob access.
// If that succeeds, then the lease was already freed.
String differentLeaseID = null;
try {
differentLeaseID = blob.acquireLease(15, null);
} catch (Exception e) {
e.printStackTrace();
fail("Caught exception trying to directly re-acquire lease from Azure");
} finally {
assertTrue(differentLeaseID != null);
AccessCondition accessCondition = AccessCondition.generateEmptyCondition();
accessCondition.setLeaseID(differentLeaseID);
blob.releaseLease(accessCondition);
}
}
@Test
// Acquire a SelfRenewingLease object. Wait for more than the lease
// timeout, to make sure the lease renews itself. Delete the file.
// That will automatically free the lease.
// (that should work without any failures).
public void testSelfRenewingLeaseFileDelete()
throws IllegalArgumentException, IOException,
InterruptedException, StorageException {
SelfRenewingLease lease;
final String FILE_KEY = "file";
final Path path = new Path(FILE_KEY);
fs.create(path);
NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
lease = nfs.getStore().acquireLease(fullKey);
assertTrue(lease.getLeaseID() != null);
// The sleep time for the keep-alive thread is 40 seconds, so sleep just
// a little beyond that, to make sure the keep-alive thread wakes up
// and renews the lease.
Thread.sleep(42000);
nfs.getStore().delete(fullKey, lease);
// Check that the file is really gone and the lease is freed.
assertTrue(!fs.exists(path));
assertTrue(lease.isFreed());
}
// Variables to check assertions in next test.
private long firstEndTime;
private long secondStartTime;
// Create two threads. One will get a lease on a file.
// The second one will try to get the lease and thus block.
// Then the first one will free the lease and the second
// one will get it and proceed.
@Test
public void testLeaseAsDistributedLock() throws IllegalArgumentException,
IOException {
final String LEASE_LOCK_FILE_KEY = "file";
fs.create(new Path(LEASE_LOCK_FILE_KEY));
NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(LEASE_LOCK_FILE_KEY)));
Thread first = new Thread(new LeaseLockAction("first-thread", fullKey));
first.start();
Thread second = new Thread(new LeaseLockAction("second-thread", fullKey));
second.start();
try {
// Wait for the two threads to finish.
first.join();
second.join();
assertTrue(firstEndTime < secondStartTime);
} catch (InterruptedException e) {
fail("Unable to wait for threads to finish");
Thread.currentThread().interrupt();
}
}
private class LeaseLockAction implements Runnable {
private String name;
private String key;
LeaseLockAction(String name, String key) {
this.name = name;
this.key = key;
}
@Override
public void run() {
LOG.info("starting thread " + name);
SelfRenewingLease lease = null;
NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
if (name.equals("first-thread")) {
try {
lease = nfs.getStore().acquireLease(key);
LOG.info(name + " acquired lease " + lease.getLeaseID());
} catch (AzureException e) {
assertTrue("Unanticipated exception", false);
}
assertTrue(lease != null);
try {
// Sleep long enough for the lease to renew once.
Thread.sleep(SelfRenewingLease.LEASE_RENEWAL_PERIOD + 2000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
try {
firstEndTime = System.currentTimeMillis();
lease.free();
LOG.info(name + " freed lease " + lease.getLeaseID());
} catch (StorageException e) {
fail("Unanticipated exception");
}
} else if (name.equals("second-thread")) {
try {
// sleep 2 sec to let first thread get ahead of this one
Thread.sleep(2000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
try {
LOG.info(name + " before getting lease");
lease = nfs.getStore().acquireLease(key);
secondStartTime = System.currentTimeMillis();
LOG.info(name + " acquired lease " + lease.getLeaseID());
} catch (AzureException e) {
assertTrue("Unanticipated exception", false);
}
assertTrue(lease != null);
try {
lease.free();
LOG.info(name + " freed lease " + lease.getLeaseID());
} catch (StorageException e) {
assertTrue("Unanticipated exception", false);
}
} else {
assertTrue("Unknown thread name", false);
}
LOG.info(name + " is exiting.");
}
}
}
| 57,482 | 35.084746 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobTypeSpeedDifference.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import junit.framework.*;
import org.junit.Test;
/**
* A simple benchmark to find out the difference in speed between block
* and page blobs.
*/
public class TestBlobTypeSpeedDifference extends TestCase {
/**
* Writes data to the given stream of the given size, flushing every
* x bytes.
*/
private static void writeTestFile(OutputStream writeStream,
long size, long flushInterval) throws IOException {
int bufferSize = (int) Math.min(1000, flushInterval);
byte[] buffer = new byte[bufferSize];
Arrays.fill(buffer, (byte) 7);
int bytesWritten = 0;
int bytesUnflushed = 0;
while (bytesWritten < size) {
int numberToWrite = (int) Math.min(bufferSize, size - bytesWritten);
writeStream.write(buffer, 0, numberToWrite);
bytesWritten += numberToWrite;
bytesUnflushed += numberToWrite;
if (bytesUnflushed >= flushInterval) {
writeStream.flush();
bytesUnflushed = 0;
}
}
}
private static class TestResult {
final long timeTakenInMs;
final long totalNumberOfRequests;
TestResult(long timeTakenInMs, long totalNumberOfRequests) {
this.timeTakenInMs = timeTakenInMs;
this.totalNumberOfRequests = totalNumberOfRequests;
}
}
/**
* Writes data to the given file of the given size, flushing every
* x bytes. Measure performance of that and return it.
*/
private static TestResult writeTestFile(NativeAzureFileSystem fs, Path path,
long size, long flushInterval) throws IOException {
AzureFileSystemInstrumentation instrumentation =
fs.getInstrumentation();
long initialRequests = instrumentation.getCurrentWebResponses();
Date start = new Date();
OutputStream output = fs.create(path);
writeTestFile(output, size, flushInterval);
output.close();
long finalRequests = instrumentation.getCurrentWebResponses();
return new TestResult(new Date().getTime() - start.getTime(),
finalRequests - initialRequests);
}
/**
* Writes data to a block blob of the given size, flushing every
* x bytes. Measure performance of that and return it.
*/
private static TestResult writeBlockBlobTestFile(NativeAzureFileSystem fs,
long size, long flushInterval) throws IOException {
return writeTestFile(fs, new Path("/blockBlob"), size, flushInterval);
}
/**
* Writes data to a page blob of the given size, flushing every
* x bytes. Measure performance of that and return it.
*/
private static TestResult writePageBlobTestFile(NativeAzureFileSystem fs,
long size, long flushInterval) throws IOException {
return writeTestFile(fs,
AzureBlobStorageTestAccount.pageBlobPath("pageBlob"),
size, flushInterval);
}
/**
* Runs the benchmark over a small 10 KB file, flushing every 500 bytes.
*/
@Test
public void testTenKbFileFrequentFlush() throws Exception {
AzureBlobStorageTestAccount testAccount =
AzureBlobStorageTestAccount.create();
if (testAccount == null) {
return;
}
try {
testForSizeAndFlushInterval(testAccount.getFileSystem(), 10 * 1000, 500);
} finally {
testAccount.cleanup();
}
}
/**
* Runs the benchmark for the given file size and flush frequency.
*/
private static void testForSizeAndFlushInterval(NativeAzureFileSystem fs,
final long size, final long flushInterval) throws IOException {
for (int i = 0; i < 5; i++) {
TestResult pageBlobResults = writePageBlobTestFile(fs, size, flushInterval);
System.out.printf(
"Page blob upload took %d ms. Total number of requests: %d.\n",
pageBlobResults.timeTakenInMs, pageBlobResults.totalNumberOfRequests);
TestResult blockBlobResults = writeBlockBlobTestFile(fs, size, flushInterval);
System.out.printf(
"Block blob upload took %d ms. Total number of requests: %d.\n",
blockBlobResults.timeTakenInMs, blockBlobResults.totalNumberOfRequests);
}
}
/**
* Runs the benchmark for the given file size and flush frequency from the
* command line.
*/
public static void main(String argv[]) throws Exception {
Configuration conf = new Configuration();
long size = 10 * 1000 * 1000;
long flushInterval = 2000;
if (argv.length > 0) {
size = Long.parseLong(argv[0]);
}
if (argv.length > 1) {
flushInterval = Long.parseLong(argv[1]);
}
testForSizeAndFlushInterval((NativeAzureFileSystem)FileSystem.get(conf),
size, flushInterval);
}
}
| 5,578 | 33.652174 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import java.io.FileNotFoundException;
import java.util.EnumSet;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
import org.junit.After;
import org.junit.Test;
import com.microsoft.azure.storage.blob.BlobOutputStream;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
/**
* Tests that WASB creates containers only if needed.
*/
public class TestContainerChecks {
private AzureBlobStorageTestAccount testAccount;
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
}
@Test
public void testContainerExistAfterDoesNotExist() throws Exception {
testAccount = AzureBlobStorageTestAccount.create("",
EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container = testAccount.getRealContainer();
FileSystem fs = testAccount.getFileSystem();
// Starting off with the container not there
assertFalse(container.exists());
// A list shouldn't create the container and will set file system store
// state to DoesNotExist
try {
fs.listStatus(new Path("/"));
assertTrue("Should've thrown.", false);
} catch (FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,
ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
// Create a container outside of the WASB FileSystem
container.create();
// Add a file to the container outside of the WASB FileSystem
CloudBlockBlob blob = testAccount.getBlobReference("foo");
BlobOutputStream outputStream = blob.openOutputStream();
outputStream.write(new byte[10]);
outputStream.close();
// Make sure the file is visible
assertTrue(fs.exists(new Path("/foo")));
assertTrue(container.exists());
}
@Test
public void testContainerCreateAfterDoesNotExist() throws Exception {
testAccount = AzureBlobStorageTestAccount.create("",
EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container = testAccount.getRealContainer();
FileSystem fs = testAccount.getFileSystem();
// Starting off with the container not there
assertFalse(container.exists());
// A list shouldn't create the container and will set file system store
// state to DoesNotExist
try {
assertNull(fs.listStatus(new Path("/")));
assertTrue("Should've thrown.", false);
} catch (FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,
ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
// Create a container outside of the WASB FileSystem
container.create();
// Write should succeed
assertTrue(fs.createNewFile(new Path("/foo")));
assertTrue(container.exists());
}
@Test
public void testContainerCreateOnWrite() throws Exception {
testAccount = AzureBlobStorageTestAccount.create("",
EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container = testAccount.getRealContainer();
FileSystem fs = testAccount.getFileSystem();
// Starting off with the container not there
assertFalse(container.exists());
// A list shouldn't create the container.
try {
fs.listStatus(new Path("/"));
assertTrue("Should've thrown.", false);
} catch (FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,
ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
// Neither should a read.
try {
fs.open(new Path("/foo"));
assertFalse("Should've thrown.", true);
} catch (FileNotFoundException ex) {
}
assertFalse(container.exists());
// Neither should a rename
assertFalse(fs.rename(new Path("/foo"), new Path("/bar")));
assertFalse(container.exists());
// But a write should.
assertTrue(fs.createNewFile(new Path("/foo")));
assertTrue(container.exists());
}
@Test
public void testContainerChecksWithSas() throws Exception {
testAccount = AzureBlobStorageTestAccount.create("",
EnumSet.of(CreateOptions.UseSas));
assumeNotNull(testAccount);
CloudBlobContainer container = testAccount.getRealContainer();
FileSystem fs = testAccount.getFileSystem();
// The container shouldn't be there
assertFalse(container.exists());
// A write should just fail
try {
fs.createNewFile(new Path("/foo"));
assertFalse("Should've thrown.", true);
} catch (AzureException ex) {
}
assertFalse(container.exists());
}
}
| 5,846 | 32.221591 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.util.HashMap;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Tests that WASB handles things gracefully when users add blobs to the Azure
* Storage container from outside WASB's control.
*/
public class TestOutOfBandAzureBlobOperations {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
fs = testAccount.getFileSystem();
backingStore = testAccount.getMockStorage().getBackingStore();
}
@After
public void tearDown() throws Exception {
testAccount.cleanup();
fs = null;
backingStore = null;
}
private void createEmptyBlobOutOfBand(String path) {
backingStore.setContent(
AzureBlobStorageTestAccount.toMockUri(path),
new byte[] { 1, 2 },
new HashMap<String, String>(),
false, 0);
}
@SuppressWarnings("deprecation")
@Test
public void testImplicitFolderListed() throws Exception {
createEmptyBlobOutOfBand("root/b");
// List the blob itself.
FileStatus[] obtained = fs.listStatus(new Path("/root/b"));
assertNotNull(obtained);
assertEquals(1, obtained.length);
assertFalse(obtained[0].isDir());
assertEquals("/root/b", obtained[0].getPath().toUri().getPath());
// List the directory
obtained = fs.listStatus(new Path("/root"));
assertNotNull(obtained);
assertEquals(1, obtained.length);
assertFalse(obtained[0].isDir());
assertEquals("/root/b", obtained[0].getPath().toUri().getPath());
// Get the directory's file status
FileStatus dirStatus = fs.getFileStatus(new Path("/root"));
assertNotNull(dirStatus);
assertTrue(dirStatus.isDir());
assertEquals("/root", dirStatus.getPath().toUri().getPath());
}
@Test
public void testImplicitFolderDeleted() throws Exception {
createEmptyBlobOutOfBand("root/b");
assertTrue(fs.exists(new Path("/root")));
assertTrue(fs.delete(new Path("/root"), true));
assertFalse(fs.exists(new Path("/root")));
}
@Test
public void testFileInImplicitFolderDeleted() throws Exception {
createEmptyBlobOutOfBand("root/b");
assertTrue(fs.exists(new Path("/root")));
assertTrue(fs.delete(new Path("/root/b"), true));
assertTrue(fs.exists(new Path("/root")));
}
@SuppressWarnings("deprecation")
@Test
public void testFileAndImplicitFolderSameName() throws Exception {
createEmptyBlobOutOfBand("root/b");
createEmptyBlobOutOfBand("root/b/c");
FileStatus[] listResult = fs.listStatus(new Path("/root/b"));
// File should win.
assertEquals(1, listResult.length);
assertFalse(listResult[0].isDir());
try {
// Trying to delete root/b/c would cause a dilemma for WASB, so
// it should throw.
fs.delete(new Path("/root/b/c"), true);
assertTrue("Should've thrown.", false);
} catch (AzureException e) {
assertEquals("File /root/b/c has a parent directory /root/b"
+ " which is also a file. Can't resolve.", e.getMessage());
}
}
private static enum DeepCreateTestVariation {
File, Folder
};
/**
* Tests that when we create the file (or folder) x/y/z, we also create
* explicit folder blobs for x and x/y
*/
@Test
public void testCreatingDeepFileCreatesExplicitFolder() throws Exception {
for (DeepCreateTestVariation variation : DeepCreateTestVariation.values()) {
switch (variation) {
case File:
assertTrue(fs.createNewFile(new Path("/x/y/z")));
break;
case Folder:
assertTrue(fs.mkdirs(new Path("/x/y/z")));
break;
}
assertTrue(backingStore
.exists(AzureBlobStorageTestAccount.toMockUri("x")));
assertTrue(backingStore.exists(AzureBlobStorageTestAccount
.toMockUri("x/y")));
fs.delete(new Path("/x"), true);
}
}
@Test
public void testSetPermissionOnImplicitFolder() throws Exception {
createEmptyBlobOutOfBand("root/b");
FsPermission newPermission = new FsPermission((short) 0600);
fs.setPermission(new Path("/root"), newPermission);
FileStatus newStatus = fs.getFileStatus(new Path("/root"));
assertNotNull(newStatus);
assertEquals(newPermission, newStatus.getPermission());
}
@Test
public void testSetOwnerOnImplicitFolder() throws Exception {
createEmptyBlobOutOfBand("root/b");
fs.setOwner(new Path("/root"), "newOwner", null);
FileStatus newStatus = fs.getFileStatus(new Path("/root"));
assertNotNull(newStatus);
assertEquals("newOwner", newStatus.getOwner());
}
}
| 5,875 | 32.577143 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Calendar;
import java.util.Date;
import java.util.EnumSet;
import java.util.GregorianCalendar;
import java.util.TimeZone;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.CloudStorageAccount;
import com.microsoft.azure.storage.StorageCredentials;
import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
import com.microsoft.azure.storage.StorageCredentialsAnonymous;
import com.microsoft.azure.storage.blob.BlobContainerPermissions;
import com.microsoft.azure.storage.blob.BlobContainerPublicAccessType;
import com.microsoft.azure.storage.blob.BlobOutputStream;
import com.microsoft.azure.storage.blob.CloudBlobClient;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions;
import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy;
import com.microsoft.azure.storage.core.Base64;
/**
* Helper class to create WASB file systems backed by either a mock in-memory
* implementation or a real Azure Storage account. See RunningLiveWasbTests.txt
* for instructions on how to connect to a real Azure Storage account.
*/
public final class AzureBlobStorageTestAccount {
private static final String ACCOUNT_KEY_PROPERTY_NAME = "fs.azure.account.key.";
private static final String SAS_PROPERTY_NAME = "fs.azure.sas.";
private static final String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml";
private static final String TEST_ACCOUNT_NAME_PROPERTY_NAME = "fs.azure.test.account.name";
public static final String MOCK_ACCOUNT_NAME = "mockAccount.blob.core.windows.net";
public static final String MOCK_CONTAINER_NAME = "mockContainer";
public static final String WASB_AUTHORITY_DELIMITER = "@";
public static final String WASB_SCHEME = "wasb";
public static final String PATH_DELIMITER = "/";
public static final String AZURE_ROOT_CONTAINER = "$root";
public static final String MOCK_WASB_URI = "wasb://" + MOCK_CONTAINER_NAME
+ WASB_AUTHORITY_DELIMITER + MOCK_ACCOUNT_NAME + "/";
private static final String USE_EMULATOR_PROPERTY_NAME = "fs.azure.test.emulator";
private static final String KEY_DISABLE_THROTTLING = "fs.azure.disable.bandwidth.throttling";
private static final String KEY_READ_TOLERATE_CONCURRENT_APPEND = "fs.azure.io.read.tolerate.concurrent.append";
public static final String DEFAULT_PAGE_BLOB_DIRECTORY = "pageBlobs";
public static final String DEFAULT_ATOMIC_RENAME_DIRECTORIES = "/atomicRenameDir1,/atomicRenameDir2";
private CloudStorageAccount account;
private CloudBlobContainer container;
private CloudBlockBlob blob;
private NativeAzureFileSystem fs;
private AzureNativeFileSystemStore storage;
private MockStorageInterface mockStorage;
private String pageBlobDirectory;
private static final ConcurrentLinkedQueue<MetricsRecord> allMetrics =
new ConcurrentLinkedQueue<MetricsRecord>();
private static boolean metricsConfigSaved = false;
private AzureBlobStorageTestAccount(NativeAzureFileSystem fs,
CloudStorageAccount account,
CloudBlobContainer container) {
this.account = account;
this.container = container;
this.fs = fs;
}
/**
* Create a test account with an initialized storage reference.
*
* @param storage
* -- store to be accessed by the account
* @param account
* -- Windows Azure account object
* @param container
* -- Windows Azure container object
*/
private AzureBlobStorageTestAccount(AzureNativeFileSystemStore storage,
CloudStorageAccount account, CloudBlobContainer container) {
this.account = account;
this.container = container;
this.storage = storage;
}
/**
* Create a test account sessions with the default root container.
*
* @param fs
* - file system, namely WASB file system
* @param account
* - Windows Azure account object
* @param blob
* - block blob reference
*/
private AzureBlobStorageTestAccount(NativeAzureFileSystem fs,
CloudStorageAccount account, CloudBlockBlob blob) {
this.account = account;
this.blob = blob;
this.fs = fs;
}
private AzureBlobStorageTestAccount(NativeAzureFileSystem fs,
MockStorageInterface mockStorage) {
this.fs = fs;
this.mockStorage = mockStorage;
}
private static void addRecord(MetricsRecord record) {
allMetrics.add(record);
}
public static String getMockContainerUri() {
return String.format("http://%s/%s",
AzureBlobStorageTestAccount.MOCK_ACCOUNT_NAME,
AzureBlobStorageTestAccount.MOCK_CONTAINER_NAME);
}
public static String toMockUri(String path) {
return String.format("http://%s/%s/%s",
AzureBlobStorageTestAccount.MOCK_ACCOUNT_NAME,
AzureBlobStorageTestAccount.MOCK_CONTAINER_NAME, path);
}
public static String toMockUri(Path path) {
// Remove the first SEPARATOR
return toMockUri(path.toUri().getRawPath().substring(1));
}
public static Path pageBlobPath() {
return new Path("/" + DEFAULT_PAGE_BLOB_DIRECTORY);
}
public static Path pageBlobPath(String fileName) {
return new Path(pageBlobPath(), fileName);
}
public Number getLatestMetricValue(String metricName, Number defaultValue)
throws IndexOutOfBoundsException{
boolean found = false;
Number ret = null;
for (MetricsRecord currentRecord : allMetrics) {
// First check if this record is coming for my file system.
if (wasGeneratedByMe(currentRecord)) {
for (AbstractMetric currentMetric : currentRecord.metrics()) {
if (currentMetric.name().equalsIgnoreCase(metricName)) {
found = true;
ret = currentMetric.value();
break;
}
}
}
}
if (!found) {
if (defaultValue != null) {
return defaultValue;
}
throw new IndexOutOfBoundsException(metricName);
}
return ret;
}
/**
* Checks if the given record was generated by my WASB file system instance.
* @param currentRecord The metrics record to check.
* @return
*/
private boolean wasGeneratedByMe(MetricsRecord currentRecord) {
String myFsId = fs.getInstrumentation().getFileSystemInstanceId().toString();
for (MetricsTag currentTag : currentRecord.tags()) {
if (currentTag.name().equalsIgnoreCase("wasbFileSystemId")) {
return currentTag.value().equals(myFsId);
}
}
return false;
}
/**
* Gets the blob reference to the given blob key.
*
* @param blobKey
* The blob key (no initial slash).
* @return The blob reference.
*/
public CloudBlockBlob getBlobReference(String blobKey)
throws Exception {
return container.getBlockBlobReference(
String.format(blobKey));
}
/**
* Acquires a short lease on the given blob in this test account.
*
* @param blobKey
* The key to the blob (no initial slash).
* @return The lease ID.
*/
public String acquireShortLease(String blobKey) throws Exception {
return getBlobReference(blobKey).acquireLease(60, null);
}
/**
* Releases the lease on the container.
*
* @param leaseID
* The lease ID.
*/
public void releaseLease(String leaseID, String blobKey) throws Exception {
AccessCondition accessCondition = new AccessCondition();
accessCondition.setLeaseID(leaseID);
getBlobReference(blobKey).releaseLease(accessCondition);
}
private static void saveMetricsConfigFile() {
if (!metricsConfigSaved) {
new org.apache.hadoop.metrics2.impl.ConfigBuilder()
.add("azure-file-system.sink.azuretestcollector.class",
StandardCollector.class.getName())
.save("hadoop-metrics2-azure-file-system.properties");
metricsConfigSaved = true;
}
}
public static AzureBlobStorageTestAccount createMock() throws Exception {
return createMock(new Configuration());
}
public static AzureBlobStorageTestAccount createMock(Configuration conf) throws Exception {
saveMetricsConfigFile();
configurePageBlobDir(conf);
configureAtomicRenameDir(conf);
AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
MockStorageInterface mockStorage = new MockStorageInterface();
store.setAzureStorageInteractionLayer(mockStorage);
NativeAzureFileSystem fs = new NativeAzureFileSystem(store);
setMockAccountKey(conf);
// register the fs provider.
fs.initialize(new URI(MOCK_WASB_URI), conf);
AzureBlobStorageTestAccount testAcct =
new AzureBlobStorageTestAccount(fs, mockStorage);
return testAcct;
}
/**
* Set the page blob directories configuration to the default if it is not
* already set. Some tests may set it differently (e.g. the page blob
* tests in TestNativeAzureFSPageBlobLive).
* @param conf The configuration to conditionally update.
*/
private static void configurePageBlobDir(Configuration conf) {
if (conf.get(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES) == null) {
conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES,
"/" + DEFAULT_PAGE_BLOB_DIRECTORY);
}
}
/** Do the same for the atomic rename directories configuration */
private static void configureAtomicRenameDir(Configuration conf) {
if (conf.get(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES) == null) {
conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES,
DEFAULT_ATOMIC_RENAME_DIRECTORIES);
}
}
/**
* Creates a test account that goes against the storage emulator.
*
* @return The test account, or null if the emulator isn't setup.
*/
public static AzureBlobStorageTestAccount createForEmulator()
throws Exception {
saveMetricsConfigFile();
NativeAzureFileSystem fs = null;
CloudBlobContainer container = null;
Configuration conf = createTestConfiguration();
if (!conf.getBoolean(USE_EMULATOR_PROPERTY_NAME, false)) {
// Not configured to test against the storage emulator.
System.out
.println("Skipping emulator Azure test because configuration " +
"doesn't indicate that it's running." +
" Please see RunningLiveWasbTests.txt for guidance.");
return null;
}
CloudStorageAccount account =
CloudStorageAccount.getDevelopmentStorageAccount();
fs = new NativeAzureFileSystem();
String containerName = String.format("wasbtests-%s-%tQ",
System.getProperty("user.name"), new Date());
container = account.createCloudBlobClient().getContainerReference(
containerName);
container.create();
// Set account URI and initialize Azure file system.
URI accountUri = createAccountUri(DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME,
containerName);
fs.initialize(accountUri, conf);
// Create test account initializing the appropriate member variables.
//
AzureBlobStorageTestAccount testAcct =
new AzureBlobStorageTestAccount(fs, account, container);
return testAcct;
}
public static AzureBlobStorageTestAccount createOutOfBandStore(
int uploadBlockSize, int downloadBlockSize) throws Exception {
saveMetricsConfigFile();
CloudBlobContainer container = null;
Configuration conf = createTestConfiguration();
CloudStorageAccount account = createTestAccount(conf);
if (null == account) {
return null;
}
String containerName = String.format("wasbtests-%s-%tQ",
System.getProperty("user.name"), new Date());
// Create the container.
container = account.createCloudBlobClient().getContainerReference(
containerName);
container.create();
String accountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
// Ensure that custom throttling is disabled and tolerate concurrent
// out-of-band appends.
conf.setBoolean(KEY_DISABLE_THROTTLING, true);
conf.setBoolean(KEY_READ_TOLERATE_CONCURRENT_APPEND, true);
// Set account URI and initialize Azure file system.
URI accountUri = createAccountUri(accountName, containerName);
// Set up instrumentation.
//
AzureFileSystemMetricsSystem.fileSystemStarted();
String sourceName = NativeAzureFileSystem.newMetricsSourceName();
String sourceDesc = "Azure Storage Volume File System metrics";
AzureFileSystemInstrumentation instrumentation = new AzureFileSystemInstrumentation(conf);
AzureFileSystemMetricsSystem.registerSource(
sourceName, sourceDesc, instrumentation);
// Create a new AzureNativeFileSystemStore object.
AzureNativeFileSystemStore testStorage = new AzureNativeFileSystemStore();
// Initialize the store with the throttling feedback interfaces.
testStorage.initialize(accountUri, conf, instrumentation);
// Create test account initializing the appropriate member variables.
//
AzureBlobStorageTestAccount testAcct =
new AzureBlobStorageTestAccount(testStorage, account, container);
return testAcct;
}
/**
* Sets the mock account key in the given configuration.
*
* @param conf
* The configuration.
*/
public static void setMockAccountKey(Configuration conf) {
setMockAccountKey(conf, MOCK_ACCOUNT_NAME);
}
/**
* Sets the mock account key in the given configuration.
*
* @param conf
* The configuration.
*/
public static void setMockAccountKey(Configuration conf, String accountName) {
conf.set(ACCOUNT_KEY_PROPERTY_NAME + accountName,
Base64.encode(new byte[] { 1, 2, 3 }));
}
private static URI createAccountUri(String accountName)
throws URISyntaxException {
return new URI(WASB_SCHEME + ":" + PATH_DELIMITER + PATH_DELIMITER
+ accountName);
}
private static URI createAccountUri(String accountName, String containerName)
throws URISyntaxException {
return new URI(WASB_SCHEME + ":" + PATH_DELIMITER + PATH_DELIMITER
+ containerName + WASB_AUTHORITY_DELIMITER + accountName);
}
public static AzureBlobStorageTestAccount create() throws Exception {
return create("");
}
public static AzureBlobStorageTestAccount create(String containerNameSuffix)
throws Exception {
return create(containerNameSuffix,
EnumSet.of(CreateOptions.CreateContainer));
}
// Create a test account which uses throttling.
public static AzureBlobStorageTestAccount createThrottled() throws Exception {
return create("",
EnumSet.of(CreateOptions.useThrottling, CreateOptions.CreateContainer));
}
public static AzureBlobStorageTestAccount create(Configuration conf)
throws Exception {
return create("", EnumSet.of(CreateOptions.CreateContainer), conf);
}
static CloudStorageAccount createStorageAccount(String accountName,
Configuration conf, boolean allowAnonymous) throws URISyntaxException,
KeyProviderException {
String accountKey = AzureNativeFileSystemStore
.getAccountKeyFromConfiguration(accountName, conf);
StorageCredentials credentials;
if (accountKey == null && allowAnonymous) {
credentials = StorageCredentialsAnonymous.ANONYMOUS;
} else {
credentials = new StorageCredentialsAccountAndKey(
accountName.split("\\.")[0], accountKey);
}
if (credentials == null) {
return null;
} else {
return new CloudStorageAccount(credentials);
}
}
public static Configuration createTestConfiguration() {
return createTestConfiguration(null);
}
private static Configuration createTestConfiguration(Configuration conf) {
if (conf == null) {
conf = new Configuration();
}
conf.addResource(TEST_CONFIGURATION_FILE_NAME);
return conf;
}
public static CloudStorageAccount createTestAccount()
throws URISyntaxException, KeyProviderException
{
return createTestAccount(createTestConfiguration());
}
static CloudStorageAccount createTestAccount(Configuration conf)
throws URISyntaxException, KeyProviderException {
String testAccountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
if (testAccountName == null) {
System.out
.println("Skipping live Azure test because of missing test account." +
" Please see RunningLiveWasbTests.txt for guidance.");
return null;
}
return createStorageAccount(testAccountName, conf, false);
}
public static enum CreateOptions {
UseSas, Readonly, CreateContainer, useThrottling
}
public static AzureBlobStorageTestAccount create(String containerNameSuffix,
EnumSet<CreateOptions> createOptions) throws Exception {
return create(containerNameSuffix, createOptions, null);
}
public static AzureBlobStorageTestAccount create(String containerNameSuffix,
EnumSet<CreateOptions> createOptions, Configuration initialConfiguration)
throws Exception {
saveMetricsConfigFile();
NativeAzureFileSystem fs = null;
CloudBlobContainer container = null;
Configuration conf = createTestConfiguration(initialConfiguration);
configurePageBlobDir(conf);
configureAtomicRenameDir(conf);
CloudStorageAccount account = createTestAccount(conf);
if (account == null) {
return null;
}
fs = new NativeAzureFileSystem();
String containerName = String.format("wasbtests-%s-%tQ%s",
System.getProperty("user.name"), new Date(), containerNameSuffix);
container = account.createCloudBlobClient().getContainerReference(
containerName);
if (createOptions.contains(CreateOptions.CreateContainer)) {
container.create();
}
String accountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
if (createOptions.contains(CreateOptions.UseSas)) {
String sas = generateSAS(container,
createOptions.contains(CreateOptions.Readonly));
if (!createOptions.contains(CreateOptions.CreateContainer)) {
// The caller doesn't want the container to be pre-created,
// so delete it now that we have generated the SAS.
container.delete();
}
// Remove the account key from the configuration to make sure we don't
// cheat and use that.
conf.set(ACCOUNT_KEY_PROPERTY_NAME + accountName, "");
// Set the SAS key.
conf.set(SAS_PROPERTY_NAME + containerName + "." + accountName, sas);
}
// Check if throttling is turned on and set throttling parameters
// appropriately.
if (createOptions.contains(CreateOptions.useThrottling)) {
conf.setBoolean(KEY_DISABLE_THROTTLING, false);
} else {
conf.setBoolean(KEY_DISABLE_THROTTLING, true);
}
// Set account URI and initialize Azure file system.
URI accountUri = createAccountUri(accountName, containerName);
fs.initialize(accountUri, conf);
// Create test account initializing the appropriate member variables.
//
AzureBlobStorageTestAccount testAcct =
new AzureBlobStorageTestAccount(fs, account, container);
return testAcct;
}
private static String generateContainerName() throws Exception {
String containerName =
String.format ("wasbtests-%s-%tQ",
System.getProperty("user.name"),
new Date());
return containerName;
}
private static String generateSAS(CloudBlobContainer container,
boolean readonly) throws Exception {
// Create a container if it does not exist.
container.createIfNotExists();
// Create a new shared access policy.
SharedAccessBlobPolicy sasPolicy = new SharedAccessBlobPolicy();
// Create a UTC Gregorian calendar value.
GregorianCalendar calendar = new GregorianCalendar(
TimeZone.getTimeZone("UTC"));
// Specify the current time as the start time for the shared access
// signature.
//
calendar.setTime(new Date());
sasPolicy.setSharedAccessStartTime(calendar.getTime());
// Use the start time delta one hour as the end time for the shared
// access signature.
calendar.add(Calendar.HOUR, 10);
sasPolicy.setSharedAccessExpiryTime(calendar.getTime());
if (readonly) {
// Set READ permissions
sasPolicy.setPermissions(EnumSet.of(
SharedAccessBlobPermissions.READ,
SharedAccessBlobPermissions.LIST));
} else {
// Set READ and WRITE permissions.
//
sasPolicy.setPermissions(EnumSet.of(
SharedAccessBlobPermissions.READ,
SharedAccessBlobPermissions.WRITE,
SharedAccessBlobPermissions.LIST));
}
// Create the container permissions.
BlobContainerPermissions containerPermissions = new BlobContainerPermissions();
// Turn public access to the container off.
containerPermissions.setPublicAccess(BlobContainerPublicAccessType.OFF);
container.uploadPermissions(containerPermissions);
// Create a shared access signature for the container.
String sas = container.generateSharedAccessSignature(sasPolicy, null);
// HACK: when the just generated SAS is used straight away, we get an
// authorization error intermittently. Sleeping for 1.5 seconds fixes that
// on my box.
Thread.sleep(1500);
// Return to caller with the shared access signature.
return sas;
}
public static void primePublicContainer(CloudBlobClient blobClient,
String accountName, String containerName, String blobName, int fileSize)
throws Exception {
// Create a container if it does not exist. The container name
// must be lower case.
CloudBlobContainer container = blobClient
.getContainerReference(containerName);
container.createIfNotExists();
// Create a new shared access policy.
SharedAccessBlobPolicy sasPolicy = new SharedAccessBlobPolicy();
// Set READ and WRITE permissions.
//
sasPolicy.setPermissions(EnumSet.of(
SharedAccessBlobPermissions.READ,
SharedAccessBlobPermissions.WRITE,
SharedAccessBlobPermissions.LIST,
SharedAccessBlobPermissions.DELETE));
// Create the container permissions.
BlobContainerPermissions containerPermissions = new BlobContainerPermissions();
// Turn public access to the container off.
containerPermissions
.setPublicAccess(BlobContainerPublicAccessType.CONTAINER);
// Set the policy using the values set above.
containerPermissions.getSharedAccessPolicies().put("testwasbpolicy",
sasPolicy);
container.uploadPermissions(containerPermissions);
// Create a blob output stream.
CloudBlockBlob blob = container.getBlockBlobReference(blobName);
BlobOutputStream outputStream = blob.openOutputStream();
outputStream.write(new byte[fileSize]);
outputStream.close();
}
public static AzureBlobStorageTestAccount createAnonymous(
final String blobName, final int fileSize) throws Exception {
NativeAzureFileSystem fs = null;
CloudBlobContainer container = null;
Configuration conf = createTestConfiguration(), noTestAccountConf = new Configuration();
// Set up a session with the cloud blob client to generate SAS and check the
// existence of a container and capture the container object.
CloudStorageAccount account = createTestAccount(conf);
if (account == null) {
return null;
}
CloudBlobClient blobClient = account.createCloudBlobClient();
// Capture the account URL and the account name.
String accountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
// Generate a container name and create a shared access signature string for
// it.
//
String containerName = generateContainerName();
// Set up public container with the specified blob name.
primePublicContainer(blobClient, accountName, containerName, blobName,
fileSize);
// Capture the blob container object. It should exist after generating the
// shared access signature.
container = blobClient.getContainerReference(containerName);
if (null == container || !container.exists()) {
final String errMsg = String
.format("Container '%s' expected but not found while creating SAS account.");
throw new Exception(errMsg);
}
// Set the account URI.
URI accountUri = createAccountUri(accountName, containerName);
// Initialize the Native Azure file system with anonymous credentials.
fs = new NativeAzureFileSystem();
fs.initialize(accountUri, noTestAccountConf);
// Create test account initializing the appropriate member variables.
AzureBlobStorageTestAccount testAcct = new AzureBlobStorageTestAccount(fs,
account, container);
// Return to caller with test account.
return testAcct;
}
private static CloudBlockBlob primeRootContainer(CloudBlobClient blobClient,
String accountName, String blobName, int fileSize) throws Exception {
// Create a container if it does not exist. The container name
// must be lower case.
CloudBlobContainer container = blobClient.getContainerReference("https://"
+ accountName + "/" + "$root");
container.createIfNotExists();
// Create a blob output stream.
CloudBlockBlob blob = container.getBlockBlobReference(blobName);
BlobOutputStream outputStream = blob.openOutputStream();
outputStream.write(new byte[fileSize]);
outputStream.close();
// Return a reference to the block blob object.
return blob;
}
public static AzureBlobStorageTestAccount createRoot(final String blobName,
final int fileSize) throws Exception {
NativeAzureFileSystem fs = null;
CloudBlobContainer container = null;
Configuration conf = createTestConfiguration();
// Set up a session with the cloud blob client to generate SAS and check the
// existence of a container and capture the container object.
CloudStorageAccount account = createTestAccount(conf);
if (account == null) {
return null;
}
CloudBlobClient blobClient = account.createCloudBlobClient();
// Capture the account URL and the account name.
String accountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
// Set up public container with the specified blob name.
CloudBlockBlob blobRoot = primeRootContainer(blobClient, accountName,
blobName, fileSize);
// Capture the blob container object. It should exist after generating the
// shared access signature.
container = blobClient.getContainerReference(AZURE_ROOT_CONTAINER);
if (null == container || !container.exists()) {
final String errMsg = String
.format("Container '%s' expected but not found while creating SAS account.");
throw new Exception(errMsg);
}
// Set the account URI without a container name.
URI accountUri = createAccountUri(accountName);
// Initialize the Native Azure file system with anonymous credentials.
fs = new NativeAzureFileSystem();
fs.initialize(accountUri, conf);
// Create test account initializing the appropriate member variables.
// Set the container value to null for the default root container.
//
AzureBlobStorageTestAccount testAcct = new AzureBlobStorageTestAccount(
fs, account, blobRoot);
// Return to caller with test account.
return testAcct;
}
public void closeFileSystem() throws Exception {
if (fs != null) {
fs.close();
}
}
public void cleanup() throws Exception {
if (fs != null) {
fs.close();
fs = null;
}
if (container != null) {
container.deleteIfExists();
container = null;
}
if (blob != null) {
// The blob member variable is set for blobs under root containers.
// Delete blob objects created for root container tests when cleaning
// up the test account.
blob.delete();
blob = null;
}
}
public NativeAzureFileSystem getFileSystem() {
return fs;
}
public AzureNativeFileSystemStore getStore() {
return this.storage;
}
/**
* Gets the real blob container backing this account if it's not a mock.
*
* @return A container, or null if it's a mock.
*/
public CloudBlobContainer getRealContainer() {
return container;
}
/**
* Gets the real blob account backing this account if it's not a mock.
*
* @return An account, or null if it's a mock.
*/
public CloudStorageAccount getRealAccount() {
return account;
}
/**
* Gets the mock storage interface if this account is backed by a mock.
*
* @return The mock storage, or null if it's backed by a real account.
*/
public MockStorageInterface getMockStorage() {
return mockStorage;
}
public static class StandardCollector implements MetricsSink {
@Override
public void init(SubsetConfiguration conf) {
}
@Override
public void putMetrics(MetricsRecord record) {
addRecord(record);
}
@Override
public void flush() {
}
}
public void setPageBlobDirectory(String directory) {
this.pageBlobDirectory = directory;
}
public String getPageBlobDirectory() {
return pageBlobDirectory;
}
}
| 30,823 | 34.147092 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
public class TestNativeAzureFileSystemContractEmulator extends
FileSystemContractBaseTest {
private AzureBlobStorageTestAccount testAccount;
@Override
protected void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createForEmulator();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
}
@Override
protected void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
}
@Override
protected void runTest() throws Throwable {
if (testAccount != null) {
super.runTest();
}
}
}
| 1,552 | 29.45098 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_CHECK_BLOCK_MD5;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_STORE_BLOB_MD5;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeNotNull;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
import org.junit.After;
import org.junit.Test;
import com.microsoft.azure.storage.Constants;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.ResponseReceivedEvent;
import com.microsoft.azure.storage.StorageErrorCodeStrings;
import com.microsoft.azure.storage.StorageEvent;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.BlockEntry;
import com.microsoft.azure.storage.blob.BlockSearchMode;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
import com.microsoft.azure.storage.core.Base64;
/**
* Test that we do proper data integrity validation with MD5 checks as
* configured.
*/
public class TestBlobDataValidation {
private AzureBlobStorageTestAccount testAccount;
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
}
/**
* Test that by default we don't store the blob-level MD5.
*/
@Test
public void testBlobMd5StoreOffByDefault() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
testStoreBlobMd5(false);
}
/**
* Test that we get blob-level MD5 storage and validation if we specify that
* in the configuration.
*/
@Test
public void testStoreBlobMd5() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(KEY_STORE_BLOB_MD5, true);
testAccount = AzureBlobStorageTestAccount.create(conf);
testStoreBlobMd5(true);
}
private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
assumeNotNull(testAccount);
// Write a test file.
String testFileKey = "testFile";
Path testFilePath = new Path("/" + testFileKey);
OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
outStream.write(new byte[] { 5, 15 });
outStream.close();
// Check that we stored/didn't store the MD5 field as configured.
CloudBlockBlob blob = testAccount.getBlobReference(testFileKey);
blob.downloadAttributes();
String obtainedMd5 = blob.getProperties().getContentMD5();
if (expectMd5Stored) {
assertNotNull(obtainedMd5);
} else {
assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
}
// Mess with the content so it doesn't match the MD5.
String newBlockId = Base64.encode(new byte[] { 55, 44, 33, 22 });
blob.uploadBlock(newBlockId,
new ByteArrayInputStream(new byte[] { 6, 45 }), 2);
blob.commitBlockList(Arrays.asList(new BlockEntry[] { new BlockEntry(
newBlockId, BlockSearchMode.UNCOMMITTED) }));
// Now read back the content. If we stored the MD5 for the blob content
// we should get a data corruption error.
InputStream inStream = testAccount.getFileSystem().open(testFilePath);
try {
byte[] inBuf = new byte[100];
while (inStream.read(inBuf) > 0){
//nothing;
}
inStream.close();
if (expectMd5Stored) {
fail("Should've thrown because of data corruption.");
}
} catch (IOException ex) {
if (!expectMd5Stored) {
throw ex;
}
StorageException cause = (StorageException)ex.getCause();
assertNotNull(cause);
assertTrue("Unexpected cause: " + cause,
cause.getErrorCode().equals(StorageErrorCodeStrings.INVALID_MD5));
}
}
/**
* Test that by default we check block-level MD5.
*/
@Test
public void testCheckBlockMd5() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
testCheckBlockMd5(true);
}
/**
* Test that we don't check block-level MD5 if we specify that in the
* configuration.
*/
@Test
public void testDontCheckBlockMd5() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(KEY_CHECK_BLOCK_MD5, false);
testAccount = AzureBlobStorageTestAccount.create(conf);
testCheckBlockMd5(false);
}
/**
* Connection inspector to check that MD5 fields for content is set/not set as
* expected.
*/
private static class ContentMD5Checker extends
StorageEvent<ResponseReceivedEvent> {
private final boolean expectMd5;
public ContentMD5Checker(boolean expectMd5) {
this.expectMd5 = expectMd5;
}
@Override
public void eventOccurred(ResponseReceivedEvent eventArg) {
HttpURLConnection connection = (HttpURLConnection) eventArg
.getConnectionObject();
if (isGetRange(connection)) {
checkObtainedMd5(connection
.getHeaderField(Constants.HeaderConstants.CONTENT_MD5));
} else if (isPutBlock(connection)) {
checkObtainedMd5(connection
.getRequestProperty(Constants.HeaderConstants.CONTENT_MD5));
}
}
private void checkObtainedMd5(String obtainedMd5) {
if (expectMd5) {
assertNotNull(obtainedMd5);
} else {
assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
}
}
private static boolean isPutBlock(HttpURLConnection connection) {
return connection.getRequestMethod().equals("PUT")
&& connection.getURL().getQuery() != null
&& connection.getURL().getQuery().contains("blockid");
}
private static boolean isGetRange(HttpURLConnection connection) {
return connection.getRequestMethod().equals("GET")
&& connection
.getHeaderField(Constants.HeaderConstants.STORAGE_RANGE_HEADER) != null;
}
}
private void testCheckBlockMd5(final boolean expectMd5Checked)
throws Exception {
assumeNotNull(testAccount);
Path testFilePath = new Path("/testFile");
// Add a hook to check that for GET/PUT requests we set/don't set
// the block-level MD5 field as configured. I tried to do clever
// testing by also messing with the raw data to see if we actually
// validate the data as expected, but the HttpURLConnection wasn't
// pluggable enough for me to do that.
testAccount.getFileSystem().getStore()
.addTestHookToOperationContext(new TestHookOperationContext() {
@Override
public OperationContext modifyOperationContext(
OperationContext original) {
original.getResponseReceivedEventHandler().addListener(
new ContentMD5Checker(expectMd5Checked));
return original;
}
});
OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
outStream.write(new byte[] { 5, 15 });
outStream.close();
InputStream inStream = testAccount.getFileSystem().open(testFilePath);
byte[] inBuf = new byte[100];
while (inStream.read(inBuf) > 0){
//nothing;
}
inStream.close();
}
}
| 8,291 | 33.840336 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Write data into a page blob and verify you can read back all of it
* or just a part of it.
*/
public class TestReadAndSeekPageBlobAfterWrite {
private static final Log LOG = LogFactory.getLog(TestReadAndSeekPageBlobAfterWrite.class);
private FileSystem fs;
private AzureBlobStorageTestAccount testAccount;
private byte[] randomData;
// Page blob physical page size
private static final int PAGE_SIZE = PageBlobFormatHelpers.PAGE_SIZE;
// Size of data on page (excluding header)
private static final int PAGE_DATA_SIZE = PAGE_SIZE - PageBlobFormatHelpers.PAGE_HEADER_SIZE;
private static final int MAX_BYTES = 33554432; // maximum bytes in a file that we'll test
private static final int MAX_PAGES = MAX_BYTES / PAGE_SIZE; // maximum number of pages we'll test
private Random rand = new Random();
// A key with a prefix under /pageBlobs, which for the test file system will
// force use of a page blob.
private static final String KEY = "/pageBlobs/file.dat";
private static final Path PATH = new Path(KEY); // path of page blob file to read and write
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
@Before
public void setUp() throws Exception {
testAccount = createTestAccount();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(testAccount);
// Make sure we are using an integral number of pages.
assertEquals(0, MAX_BYTES % PAGE_SIZE);
// load an in-memory array of random data
randomData = new byte[PAGE_SIZE * MAX_PAGES];
rand.nextBytes(randomData);
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
}
/**
* Make sure the file name (key) is a page blob file name. If anybody changes that,
* we need to come back and update this test class.
*/
@Test
public void testIsPageBlobFileName() {
AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
String[] a = KEY.split("/");
String key2 = a[1] + "/";
assertTrue(store.isPageBlobKey(key2));
}
/**
* For a set of different file sizes, write some random data to a page blob,
* read it back, and compare that what was read is the same as what was written.
*/
@Test
public void testReadAfterWriteRandomData() throws IOException {
// local shorthand
final int PDS = PAGE_DATA_SIZE;
// Test for sizes at and near page boundaries
int[] dataSizes = {
// on first page
0, 1, 2, 3,
// Near first physical page boundary (because the implementation
// stores PDS + the page header size bytes on each page).
PDS - 1, PDS, PDS + 1, PDS + 2, PDS + 3,
// near second physical page boundary
(2 * PDS) - 1, (2 * PDS), (2 * PDS) + 1, (2 * PDS) + 2, (2 * PDS) + 3,
// near tenth physical page boundary
(10 * PDS) - 1, (10 * PDS), (10 * PDS) + 1, (10 * PDS) + 2, (10 * PDS) + 3,
// test one big size, >> 4MB (an internal buffer size in the code)
MAX_BYTES
};
for (int i : dataSizes) {
testReadAfterWriteRandomData(i);
}
}
private void testReadAfterWriteRandomData(int size) throws IOException {
writeRandomData(size);
readRandomDataAndVerify(size);
}
/**
* Read "size" bytes of data and verify that what was read and what was written
* are the same.
*/
private void readRandomDataAndVerify(int size) throws AzureException, IOException {
byte[] b = new byte[size];
FSDataInputStream stream = fs.open(PATH);
int bytesRead = stream.read(b);
stream.close();
assertEquals(bytesRead, size);
// compare the data read to the data written
assertTrue(comparePrefix(randomData, b, size));
}
// return true if the beginning "size" values of the arrays are the same
private boolean comparePrefix(byte[] a, byte[] b, int size) {
if (a.length < size || b.length < size) {
return false;
}
for (int i = 0; i < size; i++) {
if (a[i] != b[i]) {
return false;
}
}
return true;
}
// Write a specified amount of random data to the file path for this test class.
private void writeRandomData(int size) throws IOException {
OutputStream output = fs.create(PATH);
output.write(randomData, 0, size);
output.close();
}
/**
* Write data to a page blob, open it, seek, and then read a range of data.
* Then compare that the data read from that range is the same as the data originally written.
*/
@Test
public void testPageBlobSeekAndReadAfterWrite() throws IOException {
writeRandomData(PAGE_SIZE * MAX_PAGES);
int recordSize = 100;
byte[] b = new byte[recordSize];
FSDataInputStream stream = fs.open(PATH);
// Seek to a boundary around the middle of the 6th page
int seekPosition = 5 * PAGE_SIZE + 250;
stream.seek(seekPosition);
// Read a record's worth of bytes and verify results
int bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Seek to another spot and read a record greater than a page
seekPosition = 10 * PAGE_SIZE + 250;
stream.seek(seekPosition);
recordSize = 1000;
b = new byte[recordSize];
bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Read the last 100 bytes of the file
recordSize = 100;
seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
stream.seek(seekPosition);
b = new byte[recordSize];
bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Read past the end of the file and we should get only partial data.
recordSize = 100;
seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
stream.seek(seekPosition);
b = new byte[recordSize];
bytesRead = stream.read(b);
assertEquals(50, bytesRead);
// compare last 50 bytes written with those read
byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
assertTrue(comparePrefix(tail, b, 50));
}
// Verify that reading a record of data after seeking gives the expected data.
private void verifyReadRandomData(byte[] b, int bytesRead, int seekPosition, int recordSize) {
byte[] originalRecordData =
Arrays.copyOfRange(randomData, seekPosition, seekPosition + recordSize + 1);
assertEquals(recordSize, bytesRead);
assertTrue(comparePrefix(originalRecordData, b, recordSize));
}
// Test many small flushed writes interspersed with periodic hflush calls.
// For manual testing, increase NUM_WRITES to a large number.
// The goal for a long-running manual test is to make sure that it finishes
// and the close() call does not time out. It also facilitates debugging into
// hflush/hsync.
@Test
public void testManySmallWritesWithHFlush() throws IOException {
writeAndReadOneFile(50, 100, 20);
}
/**
* Write a total of numWrites * recordLength data to a file, read it back,
* and check to make sure what was read is the same as what was written.
* The syncInterval is the number of writes after which to call hflush to
* force the data to storage.
*/
private void writeAndReadOneFile(int numWrites, int recordLength, int syncInterval) throws IOException {
final int NUM_WRITES = numWrites;
final int RECORD_LENGTH = recordLength;
final int SYNC_INTERVAL = syncInterval;
// A lower bound on the minimum time we think it will take to do
// a write to Azure storage.
final long MINIMUM_EXPECTED_TIME = 20;
LOG.info("Writing " + NUM_WRITES * RECORD_LENGTH + " bytes to " + PATH.getName());
FSDataOutputStream output = fs.create(PATH);
int writesSinceHFlush = 0;
try {
// Do a flush and hflush to exercise case for empty write queue in PageBlobOutputStream,
// to test concurrent execution gates.
output.flush();
output.hflush();
for (int i = 0; i < NUM_WRITES; i++) {
output.write(randomData, i * RECORD_LENGTH, RECORD_LENGTH);
writesSinceHFlush++;
output.flush();
if ((i % SYNC_INTERVAL) == 0) {
long start = Time.monotonicNow();
output.hflush();
writesSinceHFlush = 0;
long end = Time.monotonicNow();
// A true, round-trip synchronous flush to Azure must take
// a significant amount of time or we are not syncing to storage correctly.
LOG.debug("hflush duration = " + (end - start) + " msec.");
assertTrue(String.format(
"hflush duration of %d, less than minimum expected of %d",
end - start, MINIMUM_EXPECTED_TIME),
end - start >= MINIMUM_EXPECTED_TIME);
}
}
} finally {
long start = Time.monotonicNow();
output.close();
long end = Time.monotonicNow();
LOG.debug("close duration = " + (end - start) + " msec.");
if (writesSinceHFlush > 0) {
assertTrue(String.format(
"close duration with >= 1 pending write is %d, less than minimum expected of %d",
end - start, MINIMUM_EXPECTED_TIME),
end - start >= MINIMUM_EXPECTED_TIME);
}
}
// Read the data back and check it.
FSDataInputStream stream = fs.open(PATH);
int SIZE = NUM_WRITES * RECORD_LENGTH;
byte[] b = new byte[SIZE];
try {
stream.seek(0);
stream.read(b, 0, SIZE);
verifyReadRandomData(b, SIZE, 0, SIZE);
} finally {
stream.close();
}
// delete the file
fs.delete(PATH, false);
}
// Test writing to a large file repeatedly as a stress test.
// Set the repetitions to a larger number for manual testing
// for a longer stress run.
@Test
public void testLargeFileStress() throws IOException {
int numWrites = 32;
int recordSize = 1024 * 1024;
int syncInterval = 10;
int repetitions = 1;
for (int i = 0; i < repetitions; i++) {
writeAndReadOneFile(numWrites, recordSize, syncInterval);
}
}
// Write to a file repeatedly to verify that it extends.
// The page blob file should start out at 128MB and finish at 256MB.
@Test(timeout=300000)
public void testFileSizeExtension() throws IOException {
final int writeSize = 1024 * 1024;
final int numWrites = 129;
final byte dataByte = 5;
byte[] data = new byte[writeSize];
Arrays.fill(data, dataByte);
FSDataOutputStream output = fs.create(PATH);
try {
for (int i = 0; i < numWrites; i++) {
output.write(data);
output.hflush();
LOG.debug("total writes = " + (i + 1));
}
} finally {
output.close();
}
// Show that we wrote more than the default page blob file size.
assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE);
// Verify we can list the new size. That will prove we expanded the file.
FileStatus[] status = fs.listStatus(PATH);
assertTrue(status[0].getLen() == numWrites * writeSize);
LOG.debug("Total bytes written to " + PATH + " = " + status[0].getLen());
fs.delete(PATH, false);
}
}
| 12,809 | 34 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
/**
* Tests for the upload, buffering and flush logic in WASB.
*/
public class TestNativeAzureFileSystemUploadLogic {
private AzureBlobStorageTestAccount testAccount;
// Just an arbitrary number so that the values I write have a predictable
// pattern: 0, 1, 2, .. , 45, 46, 0, 1, 2, ...
static final int byteValuePeriod = 47;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
}
/**
* Various scenarios to test in how often we flush data while uploading.
*/
private enum FlushFrequencyVariation {
/**
* Flush before even a single in-memory buffer is full.
*/
BeforeSingleBufferFull,
/**
* Flush after a single in-memory buffer is full.
*/
AfterSingleBufferFull,
/**
* Flush after all the in-memory buffers got full and were
* automatically flushed to the backing store.
*/
AfterAllRingBufferFull,
}
/**
* Tests that we upload consistently if we flush after every little
* bit of data.
*/
@Test
@Ignore /* flush() no longer does anything. @@TODO: implement a force-flush and reinstate this test */
public void testConsistencyAfterSmallFlushes() throws Exception {
testConsistencyAfterManyFlushes(FlushFrequencyVariation.BeforeSingleBufferFull);
}
/**
* Tests that we upload consistently if we flush after every medium-sized
* bit of data.
*/
@Test
@Ignore /* flush() no longer does anything. @@TODO: implement a force-flush and reinstate this test */
public void testConsistencyAfterMediumFlushes() throws Exception {
testConsistencyAfterManyFlushes(FlushFrequencyVariation.AfterSingleBufferFull);
}
/**
* Tests that we upload consistently if we flush after every large chunk
* of data.
*/
@Test
@Ignore /* flush() no longer does anything. @@TODO: implement a force-flush and reinstate this test */
public void testConsistencyAfterLargeFlushes() throws Exception {
testConsistencyAfterManyFlushes(FlushFrequencyVariation.AfterAllRingBufferFull);
}
/**
* Makes sure the data in the given input is what I'd expect.
* @param inStream The input stream.
* @param expectedSize The expected size of the data in there.
*/
private void assertDataInStream(InputStream inStream, int expectedSize)
throws Exception {
int byteRead;
int countBytes = 0;
while ((byteRead = inStream.read()) != -1) {
assertEquals(countBytes % byteValuePeriod, byteRead);
countBytes++;
}
assertEquals(expectedSize, countBytes);
}
/**
* Checks that the data in the given file is what I'd expect.
* @param file The file to check.
* @param expectedSize The expected size of the data in there.
*/
private void assertDataInFile(Path file, int expectedSize) throws Exception {
InputStream inStream = testAccount.getFileSystem().open(file);
assertDataInStream(inStream, expectedSize);
inStream.close();
}
/**
* Checks that the data in the current temporary upload blob
* is what I'd expect.
* @param expectedSize The expected size of the data in there.
*/
private void assertDataInTempBlob(int expectedSize) throws Exception {
// Look for the temporary upload blob in the backing store.
InMemoryBlockBlobStore backingStore =
testAccount.getMockStorage().getBackingStore();
String tempKey = null;
for (String key : backingStore.getKeys()) {
if (key.contains(NativeAzureFileSystem.AZURE_TEMP_FOLDER)) {
// Assume this is the one we're looking for.
tempKey = key;
break;
}
}
assertNotNull(tempKey);
InputStream inStream = new ByteArrayInputStream(backingStore.getContent(tempKey));
assertDataInStream(inStream, expectedSize);
inStream.close();
}
/**
* Tests the given scenario for uploading a file while flushing
* periodically and making sure the data is always consistent
* with what I'd expect.
* @param variation The variation/scenario to test.
*/
private void testConsistencyAfterManyFlushes(FlushFrequencyVariation variation)
throws Exception {
Path uploadedFile = new Path("/uploadedFile");
OutputStream outStream = testAccount.getFileSystem().create(uploadedFile);
final int totalSize = 9123;
int flushPeriod;
switch (variation) {
case BeforeSingleBufferFull: flushPeriod = 300; break;
case AfterSingleBufferFull: flushPeriod = 600; break;
case AfterAllRingBufferFull: flushPeriod = 1600; break;
default:
throw new IllegalArgumentException("Unknown variation: " + variation);
}
for (int i = 0; i < totalSize; i++) {
outStream.write(i % byteValuePeriod);
if ((i + 1) % flushPeriod == 0) {
outStream.flush();
assertDataInTempBlob(i + 1);
}
}
outStream.close();
assertDataInFile(uploadedFile, totalSize);
}
}
| 6,224 | 32.28877 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Method;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.TimeZone;
import org.apache.commons.httpclient.URIException;
import org.apache.commons.httpclient.util.URIUtil;
import org.apache.commons.lang.NotImplementedException;
import com.microsoft.azure.storage.CloudStorageAccount;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.RetryPolicyFactory;
import com.microsoft.azure.storage.StorageCredentials;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.StorageUri;
import com.microsoft.azure.storage.blob.BlobListingDetails;
import com.microsoft.azure.storage.blob.BlobProperties;
import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.CloudBlob;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlobDirectory;
import com.microsoft.azure.storage.blob.CopyState;
import com.microsoft.azure.storage.blob.ListBlobItem;
import com.microsoft.azure.storage.blob.PageRange;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriBuilderException;
/**
* A mock implementation of the Azure Storage interaction layer for unit tests.
* Just does in-memory storage.
*/
public class MockStorageInterface extends StorageInterface {
private InMemoryBlockBlobStore backingStore;
private final ArrayList<PreExistingContainer> preExistingContainers =
new ArrayList<MockStorageInterface.PreExistingContainer>();
private String baseUriString;
public InMemoryBlockBlobStore getBackingStore() {
return backingStore;
}
/**
* Mocks the situation where a container already exists before WASB comes in,
* i.e. the situation where a user creates a container then mounts WASB on the
* pre-existing container.
*
* @param uri
* The URI of the container.
* @param metadata
* The metadata on the container.
*/
public void addPreExistingContainer(String uri,
HashMap<String, String> metadata) {
preExistingContainers.add(new PreExistingContainer(uri, metadata));
}
@Override
public void setRetryPolicyFactory(final RetryPolicyFactory retryPolicyFactory) {
}
@Override
public void setTimeoutInMs(int timeoutInMs) {
}
@Override
public void createBlobClient(CloudStorageAccount account) {
backingStore = new InMemoryBlockBlobStore();
}
@Override
public void createBlobClient(URI baseUri) {
backingStore = new InMemoryBlockBlobStore();
}
@Override
public void createBlobClient(URI baseUri, StorageCredentials credentials) {
this.baseUriString = baseUri.toString();
backingStore = new InMemoryBlockBlobStore();
}
@Override
public StorageCredentials getCredentials() {
// Not implemented for mock interface.
return null;
}
/**
* Utility function used to convert a given URI to a decoded string
* representation sent to the backing store. URIs coming as input
* to this class will be encoded by the URI class, and we want
* the underlying storage to store keys in their original UTF-8 form.
*/
private static String convertUriToDecodedString(URI uri) {
try {
String result = URIUtil.decode(uri.toString());
return result;
} catch (URIException e) {
throw new AssertionError("Failed to decode URI: " + uri.toString());
}
}
private static URI convertKeyToEncodedUri(String key) {
try {
String encodedKey = URIUtil.encodePath(key);
URI uri = new URI(encodedKey);
return uri;
} catch (URISyntaxException e) {
throw new AssertionError("Failed to encode key: " + key);
} catch (URIException e) {
throw new AssertionError("Failed to encode key: " + key);
}
}
@Override
public CloudBlobContainerWrapper getContainerReference(String name)
throws URISyntaxException, StorageException {
String fullUri;
try {
fullUri = baseUriString + "/" + URIUtil.encodePath(name);
} catch (URIException e) {
throw new RuntimeException("problem encoding fullUri", e);
}
MockCloudBlobContainerWrapper container = new MockCloudBlobContainerWrapper(
fullUri, name);
// Check if we have a pre-existing container with that name, and prime
// the wrapper with that knowledge if it's found.
for (PreExistingContainer existing : preExistingContainers) {
if (fullUri.equalsIgnoreCase(existing.containerUri)) {
// We have a pre-existing container. Mark the wrapper as created and
// make sure we use the metadata for it.
container.created = true;
backingStore.setContainerMetadata(existing.containerMetadata);
break;
}
}
return container;
}
class MockCloudBlobContainerWrapper extends CloudBlobContainerWrapper {
private boolean created = false;
private HashMap<String, String> metadata;
private final String baseUri;
private final String name;
public MockCloudBlobContainerWrapper(String baseUri, String name) {
this.baseUri = baseUri;
this.name = name;
}
@Override
public String getName() {
return name;
}
@Override
public boolean exists(OperationContext opContext) throws StorageException {
return created;
}
@Override
public void create(OperationContext opContext) throws StorageException {
created = true;
backingStore.setContainerMetadata(metadata);
}
@Override
public HashMap<String, String> getMetadata() {
return metadata;
}
@Override
public void setMetadata(HashMap<String, String> metadata) {
this.metadata = metadata;
}
@Override
public void downloadAttributes(OperationContext opContext)
throws StorageException {
metadata = backingStore.getContainerMetadata();
}
@Override
public void uploadMetadata(OperationContext opContext)
throws StorageException {
backingStore.setContainerMetadata(metadata);
}
@Override
public CloudBlobDirectoryWrapper getDirectoryReference(String relativePath)
throws URISyntaxException, StorageException {
return new MockCloudBlobDirectoryWrapper(new URI(fullUriString(
relativePath, true)));
}
@Override
public CloudBlockBlobWrapper getBlockBlobReference(String relativePath)
throws URISyntaxException, StorageException {
return new MockCloudBlockBlobWrapper(new URI(fullUriString(relativePath,
false)), null, 0);
}
@Override
public CloudPageBlobWrapper getPageBlobReference(String blobAddressUri)
throws URISyntaxException, StorageException {
return new MockCloudPageBlobWrapper(new URI(blobAddressUri), null, 0);
}
// helper to create full URIs for directory and blob.
// use withTrailingSlash=true to get a good path for a directory.
private String fullUriString(String relativePath, boolean withTrailingSlash) {
String fullUri;
String baseUri = this.baseUri;
if (!baseUri.endsWith("/")) {
baseUri += "/";
}
if (withTrailingSlash && !relativePath.equals("")
&& !relativePath.endsWith("/")) {
relativePath += "/";
}
try {
fullUri = baseUri + URIUtil.encodePath(relativePath);
} catch (URIException e) {
throw new RuntimeException("problem encoding fullUri", e);
}
return fullUri;
}
}
private static class PreExistingContainer {
final String containerUri;
final HashMap<String, String> containerMetadata;
public PreExistingContainer(String uri, HashMap<String, String> metadata) {
this.containerUri = uri;
this.containerMetadata = metadata;
}
}
class MockCloudBlobDirectoryWrapper extends CloudBlobDirectoryWrapper {
private URI uri;
public MockCloudBlobDirectoryWrapper(URI uri) {
this.uri = uri;
}
@Override
public CloudBlobContainer getContainer() throws URISyntaxException,
StorageException {
return null;
}
@Override
public CloudBlobDirectory getParent() throws URISyntaxException,
StorageException {
return null;
}
@Override
public URI getUri() {
return uri;
}
@Override
public Iterable<ListBlobItem> listBlobs(String prefix,
boolean useFlatBlobListing, EnumSet<BlobListingDetails> listingDetails,
BlobRequestOptions options, OperationContext opContext)
throws URISyntaxException, StorageException {
ArrayList<ListBlobItem> ret = new ArrayList<ListBlobItem>();
URI searchUri = null;
if (prefix == null) {
searchUri = uri;
} else {
try {
searchUri = UriBuilder.fromUri(uri).path(prefix).build();
} catch (UriBuilderException e) {
throw new AssertionError("Failed to encode path: " + prefix);
}
}
String fullPrefix = convertUriToDecodedString(searchUri);
boolean includeMetadata = listingDetails.contains(BlobListingDetails.METADATA);
HashSet<String> addedDirectories = new HashSet<String>();
for (InMemoryBlockBlobStore.ListBlobEntry current : backingStore.listBlobs(
fullPrefix, includeMetadata)) {
int indexOfSlash = current.getKey().indexOf('/', fullPrefix.length());
if (useFlatBlobListing || indexOfSlash < 0) {
if (current.isPageBlob()) {
ret.add(new MockCloudPageBlobWrapper(
convertKeyToEncodedUri(current.getKey()),
current.getMetadata(),
current.getContentLength()));
} else {
ret.add(new MockCloudBlockBlobWrapper(
convertKeyToEncodedUri(current.getKey()),
current.getMetadata(),
current.getContentLength()));
}
} else {
String directoryName = current.getKey().substring(0, indexOfSlash);
if (!addedDirectories.contains(directoryName)) {
addedDirectories.add(current.getKey());
ret.add(new MockCloudBlobDirectoryWrapper(new URI(
directoryName + "/")));
}
}
}
return ret;
}
@Override
public StorageUri getStorageUri() {
throw new NotImplementedException();
}
}
abstract class MockCloudBlobWrapper implements CloudBlobWrapper {
protected final URI uri;
protected HashMap<String, String> metadata =
new HashMap<String, String>();
protected BlobProperties properties;
protected MockCloudBlobWrapper(URI uri, HashMap<String, String> metadata,
int length) {
this.uri = uri;
this.metadata = metadata;
this.properties = new BlobProperties();
this.properties=updateLastModifed(this.properties);
this.properties=updateLength(this.properties,length);
}
protected BlobProperties updateLastModifed(BlobProperties properties){
try{
Method setLastModified =properties.getClass().
getDeclaredMethod("setLastModified", Date.class);
setLastModified.setAccessible(true);
setLastModified.invoke(this.properties,
Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
}catch(Exception e){
throw new RuntimeException(e);
}
return properties;
}
protected BlobProperties updateLength(BlobProperties properties,int length) {
try{
Method setLength =properties.getClass().
getDeclaredMethod("setLength", long.class);
setLength.setAccessible(true);
setLength.invoke(this.properties, length);
}catch (Exception e){
throw new RuntimeException(e);
}
return properties;
}
protected void refreshProperties(boolean getMetadata) {
if (backingStore.exists(convertUriToDecodedString(uri))) {
byte[] content = backingStore.getContent(convertUriToDecodedString(uri));
properties = new BlobProperties();
this.properties=updateLastModifed(this.properties);
this.properties=updateLength(this.properties, content.length);
if (getMetadata) {
metadata = backingStore.getMetadata(convertUriToDecodedString(uri));
}
}
}
@Override
public CloudBlobContainer getContainer() throws URISyntaxException,
StorageException {
return null;
}
@Override
public CloudBlobDirectory getParent() throws URISyntaxException,
StorageException {
return null;
}
@Override
public URI getUri() {
return uri;
}
@Override
public HashMap<String, String> getMetadata() {
return metadata;
}
@Override
public void setMetadata(HashMap<String, String> metadata) {
this.metadata = metadata;
}
@Override
public void startCopyFromBlob(CloudBlobWrapper sourceBlob, BlobRequestOptions options,
OperationContext opContext) throws StorageException, URISyntaxException {
backingStore.copy(convertUriToDecodedString(sourceBlob.getUri()), convertUriToDecodedString(uri));
//TODO: set the backingStore.properties.CopyState and
// update azureNativeFileSystemStore.waitForCopyToComplete
}
@Override
public CopyState getCopyState() {
return this.properties.getCopyState();
}
@Override
public void delete(OperationContext opContext, SelfRenewingLease lease)
throws StorageException {
backingStore.delete(convertUriToDecodedString(uri));
}
@Override
public boolean exists(OperationContext opContext) throws StorageException {
return backingStore.exists(convertUriToDecodedString(uri));
}
@Override
public void downloadAttributes(OperationContext opContext)
throws StorageException {
refreshProperties(true);
}
@Override
public BlobProperties getProperties() {
return properties;
}
@Override
public InputStream openInputStream(BlobRequestOptions options,
OperationContext opContext) throws StorageException {
return new ByteArrayInputStream(
backingStore.getContent(convertUriToDecodedString(uri)));
}
@Override
public void uploadMetadata(OperationContext opContext)
throws StorageException {
backingStore.setMetadata(convertUriToDecodedString(uri), metadata);
}
@Override
public void downloadRange(long offset, long length, OutputStream os,
BlobRequestOptions options, OperationContext opContext)
throws StorageException {
throw new NotImplementedException();
}
}
class MockCloudBlockBlobWrapper extends MockCloudBlobWrapper
implements CloudBlockBlobWrapper {
public MockCloudBlockBlobWrapper(URI uri, HashMap<String, String> metadata,
int length) {
super(uri, metadata, length);
}
@Override
public OutputStream openOutputStream(BlobRequestOptions options,
OperationContext opContext) throws StorageException {
return backingStore.uploadBlockBlob(convertUriToDecodedString(uri),
metadata);
}
@Override
public void setStreamMinimumReadSizeInBytes(int minimumReadSizeBytes) {
}
@Override
public void setWriteBlockSizeInBytes(int writeBlockSizeBytes) {
}
@Override
public StorageUri getStorageUri() {
return null;
}
@Override
public void uploadProperties(OperationContext context, SelfRenewingLease lease) {
}
@Override
public SelfRenewingLease acquireLease() {
return null;
}
@Override
public CloudBlob getBlob() {
return null;
}
}
class MockCloudPageBlobWrapper extends MockCloudBlobWrapper
implements CloudPageBlobWrapper {
public MockCloudPageBlobWrapper(URI uri, HashMap<String, String> metadata,
int length) {
super(uri, metadata, length);
}
@Override
public void create(long length, BlobRequestOptions options,
OperationContext opContext) throws StorageException {
throw new NotImplementedException();
}
@Override
public void uploadPages(InputStream sourceStream, long offset, long length,
BlobRequestOptions options, OperationContext opContext)
throws StorageException, IOException {
throw new NotImplementedException();
}
@Override
public ArrayList<PageRange> downloadPageRanges(BlobRequestOptions options,
OperationContext opContext) throws StorageException {
throw new NotImplementedException();
}
@Override
public void setStreamMinimumReadSizeInBytes(int minimumReadSize) {
}
@Override
public void setWriteBlockSizeInBytes(int writeBlockSizeInBytes) {
}
@Override
public StorageUri getStorageUri() {
throw new NotImplementedException();
}
@Override
public void uploadProperties(OperationContext opContext,
SelfRenewingLease lease)
throws StorageException {
}
@Override
public SelfRenewingLease acquireLease() {
return null;
}
@Override
public CloudBlob getBlob() {
return null;
}
}
}
| 18,332 | 30.338462 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.URLDecoder;
import java.util.HashMap;
import java.util.Iterator;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestNativeAzureFileSystemConcurrency {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
fs = testAccount.getFileSystem();
backingStore = testAccount.getMockStorage().getBackingStore();
}
@After
public void tearDown() throws Exception {
testAccount.cleanup();
fs = null;
backingStore = null;
}
@Test
public void testLinkBlobs() throws Exception {
Path filePath = new Path("/inProgress");
FSDataOutputStream outputStream = fs.create(filePath);
// Since the stream is still open, we should see an empty link
// blob in the backing store linking to the temporary file.
HashMap<String, String> metadata = backingStore
.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath));
assertNotNull(metadata);
String linkValue = metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
linkValue = URLDecoder.decode(linkValue, "UTF-8");
assertNotNull(linkValue);
assertTrue(backingStore.exists(AzureBlobStorageTestAccount
.toMockUri(linkValue)));
// Also, WASB should say the file exists now even before we close the
// stream.
assertTrue(fs.exists(filePath));
outputStream.close();
// Now there should be no link metadata on the final file.
metadata = backingStore.getMetadata(AzureBlobStorageTestAccount
.toMockUri(filePath));
assertNull(metadata
.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY));
}
private static String toString(FileStatus[] list) {
String[] asStrings = new String[list.length];
for (int i = 0; i < list.length; i++) {
asStrings[i] = list[i].getPath().toString();
}
return StringUtils.join(",", asStrings);
}
/**
* Test to make sure that we don't expose the temporary upload folder when
* listing at the root.
*/
@Test
public void testNoTempBlobsVisible() throws Exception {
Path filePath = new Path("/inProgress");
FSDataOutputStream outputStream = fs.create(filePath);
// Make sure I can't see the temporary blob if I ask for a listing
FileStatus[] listOfRoot = fs.listStatus(new Path("/"));
assertEquals("Expected one file listed, instead got: "
+ toString(listOfRoot), 1, listOfRoot.length);
assertEquals(fs.makeQualified(filePath), listOfRoot[0].getPath());
outputStream.close();
}
/**
* Converts a collection of exceptions to a collection of strings by getting
* the stack trace on every exception.
*/
private static Iterable<String> selectToString(
final Iterable<Throwable> collection) {
return new Iterable<String>() {
@Override
public Iterator<String> iterator() {
final Iterator<Throwable> exceptionIterator = collection.iterator();
return new Iterator<String>() {
@Override
public boolean hasNext() {
return exceptionIterator.hasNext();
}
@Override
public String next() {
StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter);
exceptionIterator.next().printStackTrace(printWriter);
printWriter.close();
return stringWriter.toString();
}
@Override
public void remove() {
exceptionIterator.remove();
}
};
}
};
}
/**
* Tests running starting multiple threads all doing various File system
* operations against the same FS.
*/
@Test
public void testMultiThreadedOperation() throws Exception {
for (int iter = 0; iter < 10; iter++) {
final int numThreads = 20;
Thread[] threads = new Thread[numThreads];
final ConcurrentLinkedQueue<Throwable> exceptionsEncountered = new ConcurrentLinkedQueue<Throwable>();
for (int i = 0; i < numThreads; i++) {
final Path threadLocalFile = new Path("/myFile" + i);
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
try {
assertTrue(!fs.exists(threadLocalFile));
OutputStream output = fs.create(threadLocalFile);
output.write(5);
output.close();
assertTrue(fs.exists(threadLocalFile));
assertTrue(fs.listStatus(new Path("/")).length > 0);
} catch (Throwable ex) {
exceptionsEncountered.add(ex);
}
}
});
}
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
assertTrue(
"Encountered exceptions: "
+ StringUtils.join("\r\n", selectToString(exceptionsEncountered)),
exceptionsEncountered.isEmpty());
tearDown();
setUp();
}
}
}
| 6,554 | 33.867021 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureConcurrentOutOfBandIo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeNotNull;
import java.io.*;
import java.util.Arrays;
import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestAzureConcurrentOutOfBandIo {
// Class constants.
static final int DOWNLOAD_BLOCK_SIZE = 8 * 1024 * 1024;
static final int UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
static final int BLOB_SIZE = 32 * 1024 * 1024;
// Number of blocks to be written before flush.
private static final int NUMBER_OF_BLOCKS = 2;
private AzureBlobStorageTestAccount testAccount;
// Overridden TestCase methods.
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createOutOfBandStore(
UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE);
assumeNotNull(testAccount);
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
}
class DataBlockWriter implements Runnable {
Thread runner;
AzureBlobStorageTestAccount writerStorageAccount;
String key;
boolean done = false;
/**
* Constructor captures the test account.
*
* @param testAccount
*/
public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) {
writerStorageAccount = testAccount;
this.key = key;
}
/**
* Start writing blocks to Azure storage.
*/
public void startWriting() {
runner = new Thread(this); // Create the block writer thread.
runner.start(); // Start the block writer thread.
}
/**
* Stop writing blocks to Azure storage.
*/
public void stopWriting() {
done = true;
}
/**
* Implementation of the runnable interface. The run method is a tight loop
* which repeatedly updates the blob with a 4 MB block.
*/
public void run() {
byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
OutputStream outputStream = null;
try {
for (int i = 0; !done; i++) {
// Write two 4 MB blocks to the blob.
//
outputStream = writerStorageAccount.getStore().storefile(
key,
new PermissionStatus("", "", FsPermission.getDefault()));
Arrays.fill(dataBlockWrite, (byte) (i % 256));
for (int j = 0; j < NUMBER_OF_BLOCKS; j++) {
outputStream.write(dataBlockWrite);
}
outputStream.flush();
outputStream.close();
}
} catch (AzureException e) {
System.out
.println("DatablockWriter thread encountered a storage exception."
+ e.getMessage());
} catch (IOException e) {
System.out
.println("DatablockWriter thread encountered an I/O exception."
+ e.getMessage());
}
}
}
@Test
public void testReadOOBWrites() throws Exception {
byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
byte[] dataBlockRead = new byte[UPLOAD_BLOCK_SIZE];
// Write to blob to make sure it exists.
//
// Write five 4 MB blocks to the blob. To ensure there is data in the blob before
// reading. This eliminates the race between the reader and writer threads.
OutputStream outputStream = testAccount.getStore().storefile(
"WASB_String.txt",
new PermissionStatus("", "", FsPermission.getDefault()));
Arrays.fill(dataBlockWrite, (byte) 255);
for (int i = 0; i < NUMBER_OF_BLOCKS; i++) {
outputStream.write(dataBlockWrite);
}
outputStream.flush();
outputStream.close();
// Start writing blocks to Azure store using the DataBlockWriter thread.
DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount,
"WASB_String.txt");
writeBlockTask.startWriting();
int count = 0;
DataInputStream inputStream = null;
for (int i = 0; i < 5; i++) {
try {
inputStream = testAccount.getStore().retrieve("WASB_String.txt");
count = 0;
int c = 0;
while (c >= 0) {
c = inputStream.read(dataBlockRead, 0, UPLOAD_BLOCK_SIZE);
if (c < 0) {
break;
}
// Counting the number of bytes.
count += c;
}
} catch (IOException e) {
System.out.println(e.getCause().toString());
e.printStackTrace();
fail();
}
// Close the stream.
if (null != inputStream){
inputStream.close();
}
}
// Stop writing blocks.
writeBlockTask.stopWriting();
// Validate that a block was read.
assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE, count);
}
}
| 5,714 | 28.611399 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Tests that we put the correct metadata on blobs created through WASB.
*/
public class TestBlobMetadata {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
fs = testAccount.getFileSystem();
backingStore = testAccount.getMockStorage().getBackingStore();
}
@After
public void tearDown() throws Exception {
testAccount.cleanup();
fs = null;
backingStore = null;
}
private static String getExpectedOwner() throws Exception {
return UserGroupInformation.getCurrentUser().getShortUserName();
}
private static String getExpectedPermissionString(String permissionString)
throws Exception {
return String.format(
"{\"owner\":\"%s\",\"group\":\"%s\",\"permissions\":\"%s\"}",
getExpectedOwner(),
NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
permissionString);
}
/**
* Tests that WASB stamped the version in the container metadata.
*/
@Test
public void testContainerVersionMetadata() throws Exception {
// Do a write operation to trigger version stamp
fs.createNewFile(new Path("/foo"));
HashMap<String, String> containerMetadata =
backingStore.getContainerMetadata();
assertNotNull(containerMetadata);
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,
containerMetadata.get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
}
private static final class FsWithPreExistingContainer implements Closeable {
private final MockStorageInterface mockStorage;
private final NativeAzureFileSystem fs;
private FsWithPreExistingContainer(MockStorageInterface mockStorage,
NativeAzureFileSystem fs) {
this.mockStorage = mockStorage;
this.fs = fs;
}
public NativeAzureFileSystem getFs() {
return fs;
}
public HashMap<String, String> getContainerMetadata() {
return mockStorage.getBackingStore().getContainerMetadata();
}
public static FsWithPreExistingContainer create() throws Exception {
return create(null);
}
public static FsWithPreExistingContainer create(
HashMap<String, String> containerMetadata) throws Exception {
AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
MockStorageInterface mockStorage = new MockStorageInterface();
store.setAzureStorageInteractionLayer(mockStorage);
NativeAzureFileSystem fs = new NativeAzureFileSystem(store);
Configuration conf = new Configuration();
AzureBlobStorageTestAccount.setMockAccountKey(conf);
mockStorage.addPreExistingContainer(
AzureBlobStorageTestAccount.getMockContainerUri(), containerMetadata);
fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
return new FsWithPreExistingContainer(mockStorage, fs);
}
@Override
public void close() throws IOException {
fs.close();
}
}
/**
* Tests that WASB stamped the version in the container metadata if it does a
* write operation to a pre-existing container.
*/
@Test
public void testPreExistingContainerVersionMetadata() throws Exception {
// Create a mock storage with a pre-existing container that has no
// WASB version metadata on it.
FsWithPreExistingContainer fsWithContainer = FsWithPreExistingContainer
.create();
// Now, do some read operations (should touch the metadata)
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0, fsWithContainer.getFs().listStatus(new Path("/")).length);
// Check that no container metadata exists yet
assertNull(fsWithContainer.getContainerMetadata());
// Now do a write operation - should stamp the version
fsWithContainer.getFs().mkdirs(new Path("/dir"));
// Check that now we have the version stamp
assertNotNull(fsWithContainer.getContainerMetadata());
assertEquals(
AzureNativeFileSystemStore.CURRENT_WASB_VERSION,
fsWithContainer.getContainerMetadata().get(
AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.close();
}
/**
* Tests that WASB works well with an older version container with ASV-era
* version and metadata.
*/
@Test
public void testFirstContainerVersionMetadata() throws Exception {
// Create a mock storage with a pre-existing container that has
// ASV version metadata on it.
HashMap<String, String> containerMetadata = new HashMap<String, String>();
containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,
AzureNativeFileSystemStore.FIRST_WASB_VERSION);
FsWithPreExistingContainer fsWithContainer = FsWithPreExistingContainer
.create(containerMetadata);
// Now, do some read operations (should touch the metadata)
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0, fsWithContainer.getFs().listStatus(new Path("/")).length);
// Check that no container metadata exists yet
assertEquals(
AzureNativeFileSystemStore.FIRST_WASB_VERSION,
fsWithContainer.getContainerMetadata().get(
AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(
AzureNativeFileSystemStore.VERSION_METADATA_KEY));
// Now do a write operation - should stamp the version
fsWithContainer.getFs().mkdirs(new Path("/dir"));
// Check that now we have the version stamp
assertEquals(
AzureNativeFileSystemStore.CURRENT_WASB_VERSION,
fsWithContainer.getContainerMetadata().get(
AzureNativeFileSystemStore.VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(
AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
fsWithContainer.close();
}
@SuppressWarnings("deprecation")
@Test
public void testPermissionMetadata() throws Exception {
FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE,
FsAction.NONE);
Path selfishFile = new Path("/noOneElse");
fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(),
fs.getDefaultBlockSize(), null).close();
HashMap<String, String> metadata = backingStore
.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
assertNotNull(metadata);
String storedPermission = metadata.get("hdi_permission");
assertEquals(getExpectedPermissionString("rw-------"), storedPermission);
FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
assertNotNull(retrievedStatus);
assertEquals(justMe, retrievedStatus.getPermission());
assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
retrievedStatus.getGroup());
}
/**
* Tests that WASB understands the old-style ASV metadata and changes it when
* it gets the chance.
*/
@Test
public void testOldPermissionMetadata() throws Exception {
Path selfishFile = new Path("/noOneElse");
HashMap<String, String> metadata =
new HashMap<String, String>();
metadata.put("asv_permission",
getExpectedPermissionString("rw-------"));
backingStore.setContent(
AzureBlobStorageTestAccount.toMockUri(selfishFile),
new byte[] { },
metadata, false, 0);
FsPermission justMe = new FsPermission(
FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
assertNotNull(retrievedStatus);
assertEquals(justMe, retrievedStatus.getPermission());
assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
retrievedStatus.getGroup());
FsPermission meAndYou = new FsPermission(
FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.NONE);
fs.setPermission(selfishFile, meAndYou);
metadata =
backingStore.getMetadata(
AzureBlobStorageTestAccount.toMockUri(selfishFile));
assertNotNull(metadata);
String storedPermission = metadata.get("hdi_permission");
assertEquals(getExpectedPermissionString("rw-rw----"),
storedPermission);
assertNull(metadata.get("asv_permission"));
}
@Test
public void testFolderMetadata() throws Exception {
Path folder = new Path("/folder");
FsPermission justRead = new FsPermission(FsAction.READ, FsAction.READ,
FsAction.READ);
fs.mkdirs(folder, justRead);
HashMap<String, String> metadata = backingStore
.getMetadata(AzureBlobStorageTestAccount.toMockUri(folder));
assertNotNull(metadata);
assertEquals("true", metadata.get("hdi_isfolder"));
assertEquals(getExpectedPermissionString("r--r--r--"),
metadata.get("hdi_permission"));
}
}
| 10,531 | 37.578755 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.HashMap;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Tests the scenario where a colon is included in the file/directory name.
*
* NativeAzureFileSystem#create(), #mkdir(), and #rename() disallow the
* creation/rename of files/directories through WASB that have colons in the
* names.
*/
public class TestNativeAzureFileSystemFileNameCheck {
private FileSystem fs = null;
private AzureBlobStorageTestAccount testAccount = null;
private String root = null;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
fs = testAccount.getFileSystem();
root = fs.getUri().toString();
}
@After
public void tearDown() throws Exception {
testAccount.cleanup();
root = null;
fs = null;
testAccount = null;
}
@Test
public void testCreate() throws Exception {
// positive test
Path testFile1 = new Path(root + "/testFile1");
assertTrue(fs.createNewFile(testFile1));
// negative test
Path testFile2 = new Path(root + "/testFile2:2");
try {
fs.createNewFile(testFile2);
fail("Should've thrown.");
} catch (IOException e) { // ignore
}
}
@Test
public void testRename() throws Exception {
// positive test
Path testFile1 = new Path(root + "/testFile1");
assertTrue(fs.createNewFile(testFile1));
Path testFile2 = new Path(root + "/testFile2");
fs.rename(testFile1, testFile2);
assertTrue(!fs.exists(testFile1) && fs.exists(testFile2));
// negative test
Path testFile3 = new Path(root + "/testFile3:3");
try {
fs.rename(testFile2, testFile3);
fail("Should've thrown.");
} catch (IOException e) { // ignore
}
assertTrue(fs.exists(testFile2));
}
@Test
public void testMkdirs() throws Exception {
// positive test
Path testFolder1 = new Path(root + "/testFolder1");
assertTrue(fs.mkdirs(testFolder1));
// negative test
Path testFolder2 = new Path(root + "/testFolder2:2");
try {
assertTrue(fs.mkdirs(testFolder2));
fail("Should've thrown.");
} catch (IOException e) { // ignore
}
}
@Test
public void testWasbFsck() throws Exception {
// positive test
Path testFolder1 = new Path(root + "/testFolder1");
assertTrue(fs.mkdirs(testFolder1));
Path testFolder2 = new Path(testFolder1, "testFolder2");
assertTrue(fs.mkdirs(testFolder2));
Path testFolder3 = new Path(testFolder1, "testFolder3");
assertTrue(fs.mkdirs(testFolder3));
Path testFile1 = new Path(testFolder2, "testFile1");
assertTrue(fs.createNewFile(testFile1));
Path testFile2 = new Path(testFolder1, "testFile2");
assertTrue(fs.createNewFile(testFile2));
assertFalse(runWasbFsck(testFolder1));
// negative test
InMemoryBlockBlobStore backingStore
= testAccount.getMockStorage().getBackingStore();
backingStore.setContent(
AzureBlobStorageTestAccount.toMockUri("testFolder1/testFolder2/test2:2"),
new byte[] { 1, 2 },
new HashMap<String, String>(), false, 0);
assertTrue(runWasbFsck(testFolder1));
}
private boolean runWasbFsck(Path p) throws Exception {
WasbFsck fsck = new WasbFsck(fs.getConf());
fsck.setMockFileSystemForTesting(fs);
fsck.run(new String[] { p.toString() });
return fsck.getPathNameWarning();
}
}
| 4,473 | 30.730496 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.junit.Ignore;
public class TestNativeAzureFileSystemContractPageBlobLive extends
FileSystemContractBaseTest {
private AzureBlobStorageTestAccount testAccount;
private AzureBlobStorageTestAccount createTestAccount()
throws Exception {
Configuration conf = new Configuration();
// Configure the page blob directories key so every file created is a page blob.
conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
// Configure the atomic rename directories key so every folder will have
// atomic rename applied.
conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
return AzureBlobStorageTestAccount.create(conf);
}
@Override
protected void setUp() throws Exception {
testAccount = createTestAccount();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
}
@Override
protected void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
}
@Override
protected void runTest() throws Throwable {
if (testAccount != null) {
super.runTest();
}
}
/**
* The following tests are failing on Azure and the Azure
* file system code needs to be modified to make them pass.
* A separate work item has been opened for this.
*/
@Ignore
public void testMoveFileUnderParent() throws Throwable {
}
@Ignore
public void testRenameFileToSelf() throws Throwable {
}
@Ignore
public void testRenameChildDirForbidden() throws Exception {
}
@Ignore
public void testMoveDirUnderParent() throws Throwable {
}
@Ignore
public void testRenameDirToSelf() throws Throwable {
}
}
| 2,690 | 28.571429 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeNotNull;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.Date;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
public class TestWasbUriAndConfiguration {
private static final int FILE_SIZE = 4096;
private static final String PATH_DELIMITER = "/";
protected String accountName;
protected String accountKey;
protected static Configuration conf = null;
private AzureBlobStorageTestAccount testAccount;
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
}
private boolean validateIOStreams(Path filePath) throws IOException {
// Capture the file system from the test account.
FileSystem fs = testAccount.getFileSystem();
return validateIOStreams(fs, filePath);
}
private boolean validateIOStreams(FileSystem fs, Path filePath)
throws IOException {
// Create and write a file
OutputStream outputStream = fs.create(filePath);
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
// Return true if the the count is equivalent to the file size.
return (FILE_SIZE == readInputStream(fs, filePath));
}
private int readInputStream(Path filePath) throws IOException {
// Capture the file system from the test account.
FileSystem fs = testAccount.getFileSystem();
return readInputStream(fs, filePath);
}
private int readInputStream(FileSystem fs, Path filePath) throws IOException {
// Read the file
InputStream inputStream = fs.open(filePath);
int count = 0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
// Return true if the the count is equivalent to the file size.
return count;
}
// Positive tests to exercise making a connection with to Azure account using
// account key.
@Test
public void testConnectUsingKey() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
assumeNotNull(testAccount);
// Validate input and output on the connection.
assertTrue(validateIOStreams(new Path("/wasb_scheme")));
}
@Test
public void testConnectUsingSAS() throws Exception {
// Create the test account with SAS credentials.
testAccount = AzureBlobStorageTestAccount.create("",
EnumSet.of(CreateOptions.UseSas, CreateOptions.CreateContainer));
assumeNotNull(testAccount);
// Validate input and output on the connection.
// NOTE: As of 4/15/2013, Azure Storage has a deficiency that prevents the
// full scenario from working (CopyFromBlob doesn't work with SAS), so
// just do a minor check until that is corrected.
assertFalse(testAccount.getFileSystem().exists(new Path("/IDontExist")));
//assertTrue(validateIOStreams(new Path("/sastest.txt")));
}
@Test
public void testConnectUsingSASReadonly() throws Exception {
// Create the test account with SAS credentials.
testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of(
CreateOptions.UseSas, CreateOptions.CreateContainer,
CreateOptions.Readonly));
assumeNotNull(testAccount);
// Create a blob in there
final String blobKey = "blobForReadonly";
CloudBlobContainer container = testAccount.getRealContainer();
CloudBlockBlob blob = container.getBlockBlobReference(blobKey);
ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[] { 1,
2, 3 });
blob.upload(inputStream, 3);
inputStream.close();
// Make sure we can read it from the file system
Path filePath = new Path("/" + blobKey);
FileSystem fs = testAccount.getFileSystem();
assertTrue(fs.exists(filePath));
byte[] obtained = new byte[3];
DataInputStream obtainedInputStream = fs.open(filePath);
obtainedInputStream.readFully(obtained);
obtainedInputStream.close();
assertEquals(3, obtained[2]);
}
@Test
public void testConnectUsingAnonymous() throws Exception {
// Create test account with anonymous credentials
testAccount = AzureBlobStorageTestAccount.createAnonymous("testWasb.txt",
FILE_SIZE);
assumeNotNull(testAccount);
// Read the file from the public folder using anonymous credentials.
assertEquals(FILE_SIZE, readInputStream(new Path("/testWasb.txt")));
}
@Test
public void testConnectToEmulator() throws Exception {
testAccount = AzureBlobStorageTestAccount.createForEmulator();
assumeNotNull(testAccount);
assertTrue(validateIOStreams(new Path("/testFile")));
}
/**
* Tests that we can connect to fully qualified accounts outside of
* blob.core.windows.net
*/
@Test
public void testConnectToFullyQualifiedAccountMock() throws Exception {
Configuration conf = new Configuration();
AzureBlobStorageTestAccount.setMockAccountKey(conf,
"mockAccount.mock.authority.net");
AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
MockStorageInterface mockStorage = new MockStorageInterface();
store.setAzureStorageInteractionLayer(mockStorage);
NativeAzureFileSystem fs = new NativeAzureFileSystem(store);
fs.initialize(
new URI("wasb://[email protected]"), conf);
fs.createNewFile(new Path("/x"));
assertTrue(mockStorage.getBackingStore().exists(
"http://mockAccount.mock.authority.net/mockContainer/x"));
fs.close();
}
public void testConnectToRoot() throws Exception {
// Set up blob names.
final String blobPrefix = String.format("wasbtests-%s-%tQ-blob",
System.getProperty("user.name"), new Date());
final String inblobName = blobPrefix + "_In" + ".txt";
final String outblobName = blobPrefix + "_Out" + ".txt";
// Create test account with default root access.
testAccount = AzureBlobStorageTestAccount.createRoot(inblobName, FILE_SIZE);
assumeNotNull(testAccount);
// Read the file from the default container.
assertEquals(FILE_SIZE, readInputStream(new Path(PATH_DELIMITER
+ inblobName)));
try {
// Capture file system.
FileSystem fs = testAccount.getFileSystem();
// Create output path and open an output stream to the root folder.
Path outputPath = new Path(PATH_DELIMITER + outblobName);
OutputStream outputStream = fs.create(outputPath);
fail("Expected an AzureException when writing to root folder.");
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
} catch (AzureException e) {
assertTrue(true);
} catch (Exception e) {
String errMsg = String.format(
"Expected AzureException but got %s instead.", e);
assertTrue(errMsg, false);
}
}
// Positive tests to exercise throttling I/O path. Connections are made to an
// Azure account using account key.
//
public void testConnectWithThrottling() throws Exception {
testAccount = AzureBlobStorageTestAccount.createThrottled();
// Validate input and output on the connection.
assertTrue(validateIOStreams(new Path("/wasb_scheme")));
}
/**
* Creates a file and writes a single byte with the given value in it.
*/
private static void writeSingleByte(FileSystem fs, Path testFile, int toWrite)
throws Exception {
OutputStream outputStream = fs.create(testFile);
outputStream.write(toWrite);
outputStream.close();
}
/**
* Reads the file given and makes sure that it's a single-byte file with the
* given value in it.
*/
private static void assertSingleByteValue(FileSystem fs, Path testFile,
int expectedValue) throws Exception {
InputStream inputStream = fs.open(testFile);
int byteRead = inputStream.read();
assertTrue("File unexpectedly empty: " + testFile, byteRead >= 0);
assertTrue("File has more than a single byte: " + testFile,
inputStream.read() < 0);
inputStream.close();
assertEquals("Unxpected content in: " + testFile, expectedValue, byteRead);
}
@Test
public void testMultipleContainers() throws Exception {
AzureBlobStorageTestAccount firstAccount = AzureBlobStorageTestAccount
.create("first"), secondAccount = AzureBlobStorageTestAccount
.create("second");
assumeNotNull(firstAccount);
assumeNotNull(secondAccount);
try {
FileSystem firstFs = firstAccount.getFileSystem(),
secondFs = secondAccount.getFileSystem();
Path testFile = new Path("/testWasb");
assertTrue(validateIOStreams(firstFs, testFile));
assertTrue(validateIOStreams(secondFs, testFile));
// Make sure that we're really dealing with two file systems here.
writeSingleByte(firstFs, testFile, 5);
writeSingleByte(secondFs, testFile, 7);
assertSingleByteValue(firstFs, testFile, 5);
assertSingleByteValue(secondFs, testFile, 7);
} finally {
firstAccount.cleanup();
secondAccount.cleanup();
}
}
@Test
public void testDefaultKeyProvider() throws Exception {
Configuration conf = new Configuration();
String account = "testacct";
String key = "testkey";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
account, conf);
assertEquals(key, result);
}
@Test
public void testValidKeyProvider() throws Exception {
Configuration conf = new Configuration();
String account = "testacct";
String key = "testkey";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
conf.setClass("fs.azure.account.keyprovider." + account,
SimpleKeyProvider.class, KeyProvider.class);
String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
account, conf);
assertEquals(key, result);
}
@Test
public void testInvalidKeyProviderNonexistantClass() throws Exception {
Configuration conf = new Configuration();
String account = "testacct";
conf.set("fs.azure.account.keyprovider." + account,
"org.apache.Nonexistant.Class");
try {
AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
Assert.fail("Nonexistant key provider class should have thrown a "
+ "KeyProviderException");
} catch (KeyProviderException e) {
}
}
@Test
public void testInvalidKeyProviderWrongClass() throws Exception {
Configuration conf = new Configuration();
String account = "testacct";
conf.set("fs.azure.account.keyprovider." + account, "java.lang.String");
try {
AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
Assert.fail("Key provider class that doesn't implement KeyProvider "
+ "should have thrown a KeyProviderException");
} catch (KeyProviderException e) {
}
}
/**
* Tests the cases when the URI is specified with no authority, i.e.
* wasb:///path/to/file.
*/
@Test
public void testNoUriAuthority() throws Exception {
// For any combination of default FS being asv(s)/wasb(s)://c@a/ and
// the actual URI being asv(s)/wasb(s):///, it should work.
String[] wasbAliases = new String[] { "wasb", "wasbs" };
for (String defaultScheme : wasbAliases) {
for (String wantedScheme : wasbAliases) {
testAccount = AzureBlobStorageTestAccount.createMock();
Configuration conf = testAccount.getFileSystem().getConf();
String authority = testAccount.getFileSystem().getUri().getAuthority();
URI defaultUri = new URI(defaultScheme, authority, null, null, null);
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
// Add references to file system implementations for wasb and wasbs.
conf.addResource("azure-test.xml");
URI wantedUri = new URI(wantedScheme + ":///random/path");
NativeAzureFileSystem obtained = (NativeAzureFileSystem) FileSystem
.get(wantedUri, conf);
assertNotNull(obtained);
assertEquals(new URI(wantedScheme, authority, null, null, null),
obtained.getUri());
// Make sure makeQualified works as expected
Path qualified = obtained.makeQualified(new Path(wantedUri));
assertEquals(new URI(wantedScheme, authority, wantedUri.getPath(),
null, null), qualified.toUri());
// Cleanup for the next iteration to not cache anything in FS
testAccount.cleanup();
FileSystem.closeAll();
}
}
// If the default FS is not a WASB FS, then specifying a URI without
// authority for the Azure file system should throw.
testAccount = AzureBlobStorageTestAccount.createMock();
Configuration conf = testAccount.getFileSystem().getConf();
conf.set(FS_DEFAULT_NAME_KEY, "file:///");
try {
FileSystem.get(new URI("wasb:///random/path"), conf);
fail("Should've thrown.");
} catch (IllegalArgumentException e) {
}
}
@Test
public void testWasbAsDefaultFileSystemHasNoPort() throws Exception {
try {
testAccount = AzureBlobStorageTestAccount.createMock();
Configuration conf = testAccount.getFileSystem().getConf();
String authority = testAccount.getFileSystem().getUri().getAuthority();
URI defaultUri = new URI("wasb", authority, null, null, null);
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
conf.addResource("azure-test.xml");
FileSystem fs = FileSystem.get(conf);
assertTrue(fs instanceof NativeAzureFileSystem);
assertEquals(-1, fs.getUri().getPort());
AbstractFileSystem afs = FileContext.getFileContext(conf)
.getDefaultFileSystem();
assertTrue(afs instanceof Wasb);
assertEquals(-1, afs.getUri().getPort());
} finally {
FileSystem.closeAll();
}
}
}
| 15,531 | 35.805687 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_CLIENT_ERRORS;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DIRECTORIES_CREATED;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_LATENCY;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_RATE;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_CREATED;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_DELETED;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_SERVER_ERRORS;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_LATENCY;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_RATE;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_WEB_RESPONSES;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import static org.mockito.Matchers.argThat;
import static org.mockito.Mockito.verify;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Date;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsTag;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestAzureFileSystemInstrumentation {
private FileSystem fs;
private AzureBlobStorageTestAccount testAccount;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(testAccount);
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
}
@Test
public void testMetricTags() throws Exception {
String accountName =
testAccount.getRealAccount().getBlobEndpoint()
.getAuthority();
String containerName =
testAccount.getRealContainer().getName();
MetricsRecordBuilder myMetrics = getMyMetrics();
verify(myMetrics).add(argThat(
new TagMatcher("accountName", accountName)
));
verify(myMetrics).add(argThat(
new TagMatcher("containerName", containerName)
));
verify(myMetrics).add(argThat(
new TagMatcher("Context", "azureFileSystem")
));
verify(myMetrics).add(argThat(
new TagExistsMatcher("wasbFileSystemId")
));
}
@Test
public void testMetricsOnMkdirList() throws Exception {
long base = getBaseWebResponses();
// Create a directory
assertTrue(fs.mkdirs(new Path("a")));
// At the time of writing, it takes 1 request to create the actual directory,
// plus 2 requests per level to check that there's no blob with that name and
// 1 request per level above to create it if it doesn't exist.
// So for the path above (/user/<name>/a), it takes 2 requests each to check
// there's no blob called /user, no blob called /user/<name> and no blob
// called /user/<name>/a, and then 3 request for the creation of the three
// levels, and then 2 requests for checking/stamping the version of AS,
// totaling 11.
// Also, there's the initial 1 request for container check so total is 12.
base = assertWebResponsesInRange(base, 1, 12);
assertEquals(1,
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
// List the root contents
assertEquals(1, fs.listStatus(new Path("/")).length);
base = assertWebResponsesEquals(base, 1);
assertNoErrors();
}
private BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
NativeAzureFileSystem azureFs = (NativeAzureFileSystem)fs;
AzureNativeFileSystemStore azureStore = azureFs.getStore();
return azureStore.getBandwidthGaugeUpdater();
}
private static byte[] nonZeroByteArray(int size) {
byte[] data = new byte[size];
Arrays.fill(data, (byte)5);
return data;
}
@Test
public void testMetricsOnFileCreateRead() throws Exception {
long base = getBaseWebResponses();
assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath = new Path("/metricsTest_webResponses");
final int FILE_SIZE = 1000;
// Suppress auto-update of bandwidth metrics so we get
// to update them exactly when we want to.
getBandwidthGaugeUpdater().suppressAutoUpdate();
// Create a file
Date start = new Date();
OutputStream outputStream = fs.create(filePath);
outputStream.write(nonZeroByteArray(FILE_SIZE));
outputStream.close();
long uploadDurationMs = new Date().getTime() - start.getTime();
// The exact number of requests/responses that happen to create a file
// can vary - at the time of writing this code it takes 10
// requests/responses for the 1000 byte file (33 for 100 MB),
// plus the initial container-check request but that
// can very easily change in the future. Just assert that we do roughly
// more than 2 but less than 15.
logOpResponseCount("Creating a 1K file", base);
base = assertWebResponsesInRange(base, 2, 15);
getBandwidthGaugeUpdater().triggerUpdate(true);
long bytesWritten = AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
assertTrue("The bytes written in the last second " + bytesWritten +
" is pretty far from the expected range of around " + FILE_SIZE +
" bytes plus a little overhead.",
bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten +
" is pretty far from the expected range of around " + FILE_SIZE +
" bytes plus a little overhead.",
totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long expectedRate = (FILE_SIZE * 1000L) / uploadDurationMs;
assertTrue("The upload rate " + uploadRate +
" is below the expected range of around " + expectedRate +
" bytes/second that the unit test observed. This should never be" +
" the case since the test underestimates the rate by looking at " +
" end-to-end time instead of just block upload time.",
uploadRate >= expectedRate);
long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
long expectedLatency = uploadDurationMs; // We're uploading less than a block.
assertTrue("The upload latency " + uploadLatency +
" should be greater than zero now that I've just uploaded a file.",
uploadLatency > 0);
assertTrue("The upload latency " + uploadLatency +
" is more than the expected range of around " + expectedLatency +
" milliseconds that the unit test observed. This should never be" +
" the case since the test overestimates the latency by looking at " +
" end-to-end time instead of just block upload time.",
uploadLatency <= expectedLatency);
// Read the file
start = new Date();
InputStream inputStream = fs.open(filePath);
int count = 0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
long downloadDurationMs = new Date().getTime() - start.getTime();
assertEquals(FILE_SIZE, count);
// Again, exact number varies. At the time of writing this code
// it takes 4 request/responses, so just assert a rough range between
// 1 and 10.
logOpResponseCount("Reading a 1K file", base);
base = assertWebResponsesInRange(base, 1, 10);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE, totalBytesRead);
long bytesRead = AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
assertTrue("The bytes read in the last second " + bytesRead +
" is pretty far from the expected range of around " + FILE_SIZE +
" bytes plus a little overhead.",
bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
expectedRate = (FILE_SIZE * 1000L) / downloadDurationMs;
assertTrue("The download rate " + downloadRate +
" is below the expected range of around " + expectedRate +
" bytes/second that the unit test observed. This should never be" +
" the case since the test underestimates the rate by looking at " +
" end-to-end time instead of just block download time.",
downloadRate >= expectedRate);
long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
expectedLatency = downloadDurationMs; // We're downloading less than a block.
assertTrue("The download latency " + downloadLatency +
" should be greater than zero now that I've just downloaded a file.",
downloadLatency > 0);
assertTrue("The download latency " + downloadLatency +
" is more than the expected range of around " + expectedLatency +
" milliseconds that the unit test observed. This should never be" +
" the case since the test overestimates the latency by looking at " +
" end-to-end time instead of just block download time.",
downloadLatency <= expectedLatency);
assertNoErrors();
}
@Test
public void testMetricsOnBigFileCreateRead() throws Exception {
long base = getBaseWebResponses();
assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath = new Path("/metricsTest_webResponses");
final int FILE_SIZE = 100 * 1024 * 1024;
// Suppress auto-update of bandwidth metrics so we get
// to update them exactly when we want to.
getBandwidthGaugeUpdater().suppressAutoUpdate();
// Create a file
OutputStream outputStream = fs.create(filePath);
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
// The exact number of requests/responses that happen to create a file
// can vary - at the time of writing this code it takes 34
// requests/responses for the 100 MB file,
// plus the initial container check request, but that
// can very easily change in the future. Just assert that we do roughly
// more than 20 but less than 50.
logOpResponseCount("Creating a 100 MB file", base);
base = assertWebResponsesInRange(base, 20, 50);
getBandwidthGaugeUpdater().triggerUpdate(true);
long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten +
" is pretty far from the expected range of around " + FILE_SIZE +
" bytes plus a little overhead.",
totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
assertTrue("The upload latency " + uploadLatency +
" should be greater than zero now that I've just uploaded a file.",
uploadLatency > 0);
// Read the file
InputStream inputStream = fs.open(filePath);
int count = 0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
assertEquals(FILE_SIZE, count);
// Again, exact number varies. At the time of writing this code
// it takes 27 request/responses, so just assert a rough range between
// 20 and 40.
logOpResponseCount("Reading a 100 MB file", base);
base = assertWebResponsesInRange(base, 20, 40);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE, totalBytesRead);
long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
assertTrue("The download latency " + downloadLatency +
" should be greater than zero now that I've just downloaded a file.",
downloadLatency > 0);
}
@Test
public void testMetricsOnFileRename() throws Exception {
long base = getBaseWebResponses();
Path originalPath = new Path("/metricsTest_RenameStart");
Path destinationPath = new Path("/metricsTest_RenameFinal");
// Create an empty file
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
assertTrue(fs.createNewFile(originalPath));
logOpResponseCount("Creating an empty file", base);
base = assertWebResponsesInRange(base, 2, 20);
assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
// Rename the file
assertTrue(fs.rename(originalPath, destinationPath));
// Varies: at the time of writing this code it takes 7 requests/responses.
logOpResponseCount("Renaming a file", base);
base = assertWebResponsesInRange(base, 2, 15);
assertNoErrors();
}
@Test
public void testMetricsOnFileExistsDelete() throws Exception {
long base = getBaseWebResponses();
Path filePath = new Path("/metricsTest_delete");
// Check existence
assertFalse(fs.exists(filePath));
// At the time of writing this code it takes 2 requests/responses to
// check existence, which seems excessive, plus initial request for
// container check.
logOpResponseCount("Checking file existence for non-existent file", base);
base = assertWebResponsesInRange(base, 1, 3);
// Create an empty file
assertTrue(fs.createNewFile(filePath));
base = getCurrentWebResponses();
// Check existence again
assertTrue(fs.exists(filePath));
logOpResponseCount("Checking file existence for existent file", base);
base = assertWebResponsesInRange(base, 1, 2);
// Delete the file
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
assertTrue(fs.delete(filePath, false));
// At the time of writing this code it takes 4 requests/responses to
// delete, which seems excessive. Check for range 1-4 for now.
logOpResponseCount("Deleting a file", base);
base = assertWebResponsesInRange(base, 1, 4);
assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
assertNoErrors();
}
@Test
public void testMetricsOnDirRename() throws Exception {
long base = getBaseWebResponses();
Path originalDirName = new Path("/metricsTestDirectory_RenameStart");
Path innerFileName = new Path(originalDirName, "innerFile");
Path destDirName = new Path("/metricsTestDirectory_RenameFinal");
// Create an empty directory
assertTrue(fs.mkdirs(originalDirName));
base = getCurrentWebResponses();
// Create an inner file
assertTrue(fs.createNewFile(innerFileName));
base = getCurrentWebResponses();
// Rename the directory
assertTrue(fs.rename(originalDirName, destDirName));
// At the time of writing this code it takes 11 requests/responses
// to rename the directory with one file. Check for range 1-20 for now.
logOpResponseCount("Renaming a directory", base);
base = assertWebResponsesInRange(base, 1, 20);
assertNoErrors();
}
@Test
public void testClientErrorMetrics() throws Exception {
String directoryName = "metricsTestDirectory_ClientError";
Path directoryPath = new Path("/" + directoryName);
assertTrue(fs.mkdirs(directoryPath));
String leaseID = testAccount.acquireShortLease(directoryName);
try {
try {
fs.delete(directoryPath, true);
assertTrue("Should've thrown.", false);
} catch (AzureException ex) {
assertTrue("Unexpected exception: " + ex,
ex.getMessage().contains("lease"));
}
assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
} finally {
testAccount.releaseLease(leaseID, directoryName);
}
}
private void logOpResponseCount(String opName, long base) {
System.out.println(opName + " took " + (getCurrentWebResponses() - base) +
" web responses to complete.");
}
/**
* Gets (and asserts) the value of the wasb_web_responses counter just
* after the creation of the file system object.
*/
private long getBaseWebResponses() {
// The number of requests should start at 0
return assertWebResponsesEquals(0, 0);
}
/**
* Gets the current value of the wasb_web_responses counter.
*/
private long getCurrentWebResponses() {
return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
}
/**
* Checks that the wasb_web_responses counter is at the given value.
* @param base The base value (before the operation of interest).
* @param expected The expected value for the operation of interest.
* @return The new base value now.
*/
private long assertWebResponsesEquals(long base, long expected) {
assertCounter(WASB_WEB_RESPONSES, base + expected, getMyMetrics());
return base + expected;
}
private void assertNoErrors() {
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
}
/**
* Checks that the wasb_web_responses counter is in the given range.
* @param base The base value (before the operation of interest).
* @param inclusiveLowerLimit The lower limit for what it should increase by.
* @param inclusiveUpperLimit The upper limit for what it should increase by.
* @return The new base value now.
*/
private long assertWebResponsesInRange(long base,
long inclusiveLowerLimit,
long inclusiveUpperLimit) {
long currentResponses = getCurrentWebResponses();
long justOperation = currentResponses - base;
assertTrue(String.format(
"Web responses expected in range [%d, %d], but was %d.",
inclusiveLowerLimit, inclusiveUpperLimit, justOperation),
justOperation >= inclusiveLowerLimit &&
justOperation <= inclusiveUpperLimit);
return currentResponses;
}
/**
* Gets the metrics for the file system object.
* @return The metrics record.
*/
private MetricsRecordBuilder getMyMetrics() {
return getMetrics(getInstrumentation());
}
private AzureFileSystemInstrumentation getInstrumentation() {
return ((NativeAzureFileSystem)fs).getInstrumentation();
}
/**
* A matcher class for asserting that we got a tag with a given
* value.
*/
private static class TagMatcher extends TagExistsMatcher {
private final String tagValue;
public TagMatcher(String tagName, String tagValue) {
super(tagName);
this.tagValue = tagValue;
}
@Override
public boolean matches(MetricsTag toMatch) {
return toMatch.value().equals(tagValue);
}
@Override
public void describeTo(Description desc) {
super.describeTo(desc);
desc.appendText(" with value " + tagValue);
}
}
/**
* A matcher class for asserting that we got a tag with any value.
*/
private static class TagExistsMatcher extends BaseMatcher<MetricsTag> {
private final String tagName;
public TagExistsMatcher(String tagName) {
this.tagName = tagName;
}
@Override
public boolean matches(Object toMatch) {
MetricsTag asTag = (MetricsTag)toMatch;
return asTag.name().equals(tagName) && matches(asTag);
}
protected boolean matches(MetricsTag toMatch) {
return true;
}
@Override
public void describeTo(Description desc) {
desc.appendText("Has tag " + tagName);
}
}
/**
* A matcher class for asserting that a long value is in a
* given range.
*/
private static class InRange extends BaseMatcher<Long> {
private final long inclusiveLowerLimit;
private final long inclusiveUpperLimit;
private long obtained;
public InRange(long inclusiveLowerLimit, long inclusiveUpperLimit) {
this.inclusiveLowerLimit = inclusiveLowerLimit;
this.inclusiveUpperLimit = inclusiveUpperLimit;
}
@Override
public boolean matches(Object number) {
obtained = (Long)number;
return obtained >= inclusiveLowerLimit &&
obtained <= inclusiveUpperLimit;
}
@Override
public void describeTo(Description description) {
description.appendText("Between " + inclusiveLowerLimit +
" and " + inclusiveUpperLimit + " inclusively");
}
}
}
| 23,434 | 39.827526 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestRollingWindowAverage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import static org.junit.Assert.*;
import org.junit.*;
public class TestRollingWindowAverage {
/**
* Tests the basic functionality of the class.
*/
@Test
public void testBasicFunctionality() throws Exception {
RollingWindowAverage average = new RollingWindowAverage(100);
assertEquals(0, average.getCurrentAverage()); // Nothing there yet.
average.addPoint(5);
assertEquals(5, average.getCurrentAverage()); // One point in there.
Thread.sleep(50);
average.addPoint(15);
assertEquals(10, average.getCurrentAverage()); // Two points in there.
Thread.sleep(60);
assertEquals(15, average.getCurrentAverage()); // One point retired.
Thread.sleep(50);
assertEquals(0, average.getCurrentAverage()); // Both points retired.
}
}
| 1,631 | 36.953488 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestBandwidthGaugeUpdater {
@Test
public void testSingleThreaded() throws Exception {
AzureFileSystemInstrumentation instrumentation =
new AzureFileSystemInstrumentation(new Configuration());
BandwidthGaugeUpdater updater =
new BandwidthGaugeUpdater(instrumentation, 1000, true);
updater.triggerUpdate(true);
assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation));
updater.blockUploaded(new Date(), new Date(), 150);
updater.triggerUpdate(true);
assertEquals(150, AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation));
updater.blockUploaded(new Date(new Date().getTime() - 10000),
new Date(), 200);
updater.triggerUpdate(true);
long currentBytes = AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation);
assertTrue(
"We expect around (200/10 = 20) bytes written as the gauge value." +
"Got " + currentBytes,
currentBytes > 18 && currentBytes < 22);
updater.close();
}
@Test
public void testMultiThreaded() throws Exception {
final AzureFileSystemInstrumentation instrumentation =
new AzureFileSystemInstrumentation(new Configuration());
final BandwidthGaugeUpdater updater =
new BandwidthGaugeUpdater(instrumentation, 1000, true);
Thread[] threads = new Thread[10];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
updater.blockDownloaded(new Date(), new Date(), 10);
updater.blockDownloaded(new Date(0), new Date(0), 10);
}
});
}
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
updater.triggerUpdate(false);
assertEquals(10 * threads.length, AzureMetricsTestUtil.getCurrentBytesRead(instrumentation));
updater.close();
}
}
| 2,946 | 36.303797 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestNativeAzureFileSystemMetricsSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import static org.junit.Assert.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
import org.junit.*;
/**
* Tests that the WASB-specific metrics system is working correctly.
*/
public class TestNativeAzureFileSystemMetricsSystem {
private static final String WASB_FILES_CREATED = "wasb_files_created";
private static int getFilesCreated(AzureBlobStorageTestAccount testAccount) {
return testAccount.getLatestMetricValue(WASB_FILES_CREATED, 0).intValue();
}
/**
* Tests that when we have multiple file systems created/destroyed
* metrics from each are published correctly.
* @throws Exception
*/
@Test
public void testMetricsAcrossFileSystems()
throws Exception {
AzureBlobStorageTestAccount a1, a2, a3;
a1 = AzureBlobStorageTestAccount.createMock();
assertEquals(0, getFilesCreated(a1));
a2 = AzureBlobStorageTestAccount.createMock();
assertEquals(0, getFilesCreated(a2));
a1.getFileSystem().create(new Path("/foo")).close();
a1.getFileSystem().create(new Path("/bar")).close();
a2.getFileSystem().create(new Path("/baz")).close();
assertEquals(0, getFilesCreated(a1));
assertEquals(0, getFilesCreated(a2));
a1.closeFileSystem(); // Causes the file system to close, which publishes metrics
a2.closeFileSystem();
assertEquals(2, getFilesCreated(a1));
assertEquals(1, getFilesCreated(a2));
a3 = AzureBlobStorageTestAccount.createMock();
assertEquals(0, getFilesCreated(a3));
a3.closeFileSystem();
assertEquals(0, getFilesCreated(a3));
}
@Test
public void testMetricsSourceNames() {
String name1 = NativeAzureFileSystem.newMetricsSourceName();
String name2 = NativeAzureFileSystem.newMetricsSourceName();
assertTrue(name1.startsWith("AzureFileSystemMetrics"));
assertTrue(name2.startsWith("AzureFileSystemMetrics"));
assertTrue(!name1.equals(name2));
}
@Test
public void testSkipMetricsCollection() throws Exception {
AzureBlobStorageTestAccount a;
a = AzureBlobStorageTestAccount.createMock();
a.getFileSystem().getConf().setBoolean(
NativeAzureFileSystem.SKIP_AZURE_METRICS_PROPERTY_NAME, true);
a.getFileSystem().create(new Path("/foo")).close();
a.closeFileSystem(); // Causes the file system to close, which publishes metrics
assertEquals(0, getFilesCreated(a));
}
}
| 3,318 | 36.292135 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/AzureMetricsTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_BYTES_READ;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_BYTES_WRITTEN;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_RAW_BYTES_DOWNLOADED;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_RAW_BYTES_UPLOADED;
import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_WEB_RESPONSES;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getLongGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
public final class AzureMetricsTestUtil {
public static long getLongGaugeValue(AzureFileSystemInstrumentation instrumentation,
String gaugeName) {
return getLongGauge(gaugeName, getMetrics(instrumentation));
}
/**
* Gets the current value of the given counter.
*/
public static long getLongCounterValue(AzureFileSystemInstrumentation instrumentation,
String counterName) {
return getLongCounter(counterName, getMetrics(instrumentation));
}
/**
* Gets the current value of the wasb_bytes_written_last_second counter.
*/
public static long getCurrentBytesWritten(AzureFileSystemInstrumentation instrumentation) {
return getLongGaugeValue(instrumentation, WASB_BYTES_WRITTEN);
}
/**
* Gets the current value of the wasb_bytes_read_last_second counter.
*/
public static long getCurrentBytesRead(AzureFileSystemInstrumentation instrumentation) {
return getLongGaugeValue(instrumentation, WASB_BYTES_READ);
}
/**
* Gets the current value of the wasb_raw_bytes_uploaded counter.
*/
public static long getCurrentTotalBytesWritten(
AzureFileSystemInstrumentation instrumentation) {
return getLongCounterValue(instrumentation, WASB_RAW_BYTES_UPLOADED);
}
/**
* Gets the current value of the wasb_raw_bytes_downloaded counter.
*/
public static long getCurrentTotalBytesRead(
AzureFileSystemInstrumentation instrumentation) {
return getLongCounterValue(instrumentation, WASB_RAW_BYTES_DOWNLOADED);
}
/**
* Gets the current value of the asv_web_responses counter.
*/
public static long getCurrentWebResponses(
AzureFileSystemInstrumentation instrumentation) {
return getLongCounter(WASB_WEB_RESPONSES, getMetrics(instrumentation));
}
}
| 3,327 | 38.619048 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.net.HttpURLConnection;
import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.RequestResult;
import com.microsoft.azure.storage.ResponseReceivedEvent;
import com.microsoft.azure.storage.SendingRequestEvent;
import com.microsoft.azure.storage.StorageEvent;
/*
* Self throttling is implemented by hooking into send & response callbacks
* One instance of this class is created per operationContext so each blobUpload/blobDownload/etc.
*
* Self throttling only applies to 2nd and subsequent packets of an operation. This is a simple way to
* ensure it only affects bulk transfers and not every tiny request.
*
* A blobDownload will involve sequential packet transmissions and so there are no concurrency concerns
* A blobUpload will generally involve concurrent upload worker threads that share one operationContext and one throttling instance.
* -- we do not track the latencies for each worker thread as they are doing similar work and will rarely collide in practice.
* -- concurrent access to lastE2Edelay must be protected.
* -- volatile is necessary and should be sufficient to protect simple access to primitive values (java 1.5 onwards)
* -- synchronized{} blocks are also used to be conservative and for easier maintenance.
*
* If an operation were to perform concurrent GETs and PUTs there is the possibility of getting confused regarding
* whether lastE2Edelay was a read or write measurement. This scenario does not occur.
*
* readFactor = target read throughput as factor of unrestricted throughput.
* writeFactor = target write throughput as factor of unrestricted throughput.
*
* As we introduce delays it is important to only measure the actual E2E latency and not the augmented latency
* To achieve this, we fiddle the 'startDate' of the transfer tracking object.
*/
/**
*
* Introduces delays in our Azure traffic to prevent overrunning the server-side throttling limits.
*
*/
@InterfaceAudience.Private
public class SelfThrottlingIntercept {
public static final Log LOG = LogFactory
.getLog(SelfThrottlingIntercept.class);
private final float readFactor;
private final float writeFactor;
private final OperationContext operationContext;
// Concurrency: access to non-final members must be thread-safe
private long lastE2Elatency;
public SelfThrottlingIntercept(OperationContext operationContext,
float readFactor, float writeFactor) {
this.operationContext = operationContext;
this.readFactor = readFactor;
this.writeFactor = writeFactor;
}
public static void hook(OperationContext operationContext, float readFactor,
float writeFactor) {
SelfThrottlingIntercept throttler = new SelfThrottlingIntercept(
operationContext, readFactor, writeFactor);
ResponseReceivedListener responseListener = throttler.new ResponseReceivedListener();
SendingRequestListener sendingListener = throttler.new SendingRequestListener();
operationContext.getResponseReceivedEventHandler().addListener(
responseListener);
operationContext.getSendingRequestEventHandler().addListener(
sendingListener);
}
public void responseReceived(ResponseReceivedEvent event) {
RequestResult result = event.getRequestResult();
Date startDate = result.getStartDate();
Date stopDate = result.getStopDate();
long elapsed = stopDate.getTime() - startDate.getTime();
synchronized (this) {
this.lastE2Elatency = elapsed;
}
if (LOG.isDebugEnabled()) {
int statusCode = result.getStatusCode();
String etag = result.getEtag();
HttpURLConnection urlConnection = (HttpURLConnection) event
.getConnectionObject();
int contentLength = urlConnection.getContentLength();
String requestMethod = urlConnection.getRequestMethod();
long threadId = Thread.currentThread().getId();
LOG.debug(String
.format(
"SelfThrottlingIntercept:: ResponseReceived: threadId=%d, Status=%d, Elapsed(ms)=%d, ETAG=%s, contentLength=%d, requestMethod=%s",
threadId, statusCode, elapsed, etag, contentLength, requestMethod));
}
}
public void sendingRequest(SendingRequestEvent sendEvent) {
long lastLatency;
boolean operationIsRead; // for logging
synchronized (this) {
lastLatency = this.lastE2Elatency;
}
float sleepMultiple;
HttpURLConnection urlConnection = (HttpURLConnection) sendEvent
.getConnectionObject();
// Azure REST API never uses POST, so PUT is a sufficient test for an
// upload.
if (urlConnection.getRequestMethod().equalsIgnoreCase("PUT")) {
operationIsRead = false;
sleepMultiple = (1 / writeFactor) - 1;
} else {
operationIsRead = true;
sleepMultiple = (1 / readFactor) - 1;
}
long sleepDuration = (long) (sleepMultiple * lastLatency);
if (sleepDuration < 0) {
sleepDuration = 0;
}
if (sleepDuration > 0) {
try {
// Thread.sleep() is not exact but it seems sufficiently accurate for
// our needs. If needed this could become a loop of small waits that
// tracks actual
// elapsed time.
Thread.sleep(sleepDuration);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
// reset to avoid counting the sleep against request latency
sendEvent.getRequestResult().setStartDate(new Date());
}
if (LOG.isDebugEnabled()) {
boolean isFirstRequest = (lastLatency == 0);
long threadId = Thread.currentThread().getId();
LOG.debug(String
.format(
" SelfThrottlingIntercept:: SendingRequest: threadId=%d, requestType=%s, isFirstRequest=%b, sleepDuration=%d",
threadId, operationIsRead ? "read " : "write", isFirstRequest,
sleepDuration));
}
}
// simply forwards back to the main class.
// this is necessary as our main class cannot implement two base-classes.
@InterfaceAudience.Private
class SendingRequestListener extends StorageEvent<SendingRequestEvent> {
@Override
public void eventOccurred(SendingRequestEvent event) {
sendingRequest(event);
}
}
// simply forwards back to the main class.
// this is necessary as our main class cannot implement two base-classes.
@InterfaceAudience.Private
class ResponseReceivedListener extends StorageEvent<ResponseReceivedEvent> {
@Override
public void eventOccurred(ResponseReceivedEvent event) {
responseReceived(event);
}
}
}
| 7,629 | 37.928571 | 144 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PartialListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* <p>
* Holds information on a directory listing for a {@link NativeFileSystemStore}.
* This includes the {@link FileMetadata files} and directories (their names)
* contained in a directory.
* </p>
* <p>
* This listing may be returned in chunks, so a <code>priorLastKey</code> is
* provided so that the next chunk may be requested.
* </p>
*
* @see NativeFileSystemStore#list(String, int, String)
*/
@InterfaceAudience.Private
class PartialListing {
private final String priorLastKey;
private final FileMetadata[] files;
private final String[] commonPrefixes;
public PartialListing(String priorLastKey, FileMetadata[] files,
String[] commonPrefixes) {
this.priorLastKey = priorLastKey;
this.files = files;
this.commonPrefixes = commonPrefixes;
}
public FileMetadata[] getFiles() {
return files;
}
public String[] getCommonPrefixes() {
return commonPrefixes;
}
public String getPriorLastKey() {
return priorLastKey;
}
}
| 1,896 | 29.596774 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureLinkedStack.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
/**
* A simple generic stack implementation using linked lists. The stack
* implementation has five main operations:
* <ul>
* <li>push -- adds an element to the top of the stack</li>
* <li>pop -- removes an element from the top of the stack and returns a
* reference to it</li>
* <li>peek -- peek returns an element from the top of the stack without
* removing it</li>
* <li>isEmpty -- tests whether the stack is empty</li>
* <li>size -- returns the size of the stack</li>
* <li>toString -- returns a string representation of the stack.</li>
* </ul>
*/
public class AzureLinkedStack<E> {
/*
* Linked node for Azure stack collection.
*/
private static class AzureLinkedNode<E> {
private E element; // Linked element on the list.
private AzureLinkedNode<E> next;// Reference to the next linked element on
// list.
/*
* The constructor builds the linked node with no successor
*
* @param element : The value of the element to be stored with this node.
*/
private AzureLinkedNode(E anElement) {
element = anElement;
next = null;
}
/*
* Constructor builds a linked node with a specified successor. The
* successor may be null.
*
* @param anElement : new element to be created.
*
* @param nextElement: successor to the new element.
*/
private AzureLinkedNode(E anElement, AzureLinkedNode<E> nextElement) {
element = anElement;
next = nextElement;
}
/*
* Get the element stored in the linked node.
*
* @return E : element stored in linked node.
*/
private E getElement() {
return element;
}
/*
* Get the successor node to the element.
*
* @return E : reference to the succeeding node on the list.
*/
private AzureLinkedNode<E> getNext() {
return next;
}
}
private int count; // The number of elements stored on the stack.
private AzureLinkedNode<E> top; // Top of the stack.
/*
* Constructor creating an empty stack.
*/
public AzureLinkedStack() {
// Simply initialize the member variables.
//
count = 0;
top = null;
}
/*
* Adds an element to the top of the stack.
*
* @param element : element pushed to the top of the stack.
*/
public void push(E element) {
// Create a new node containing a reference to be placed on the stack.
// Set the next reference to the new node to point to the current top
// of the stack. Set the top reference to point to the new node. Finally
// increment the count of nodes on the stack.
//
AzureLinkedNode<E> newNode = new AzureLinkedNode<E>(element, top);
top = newNode;
count++;
}
/*
* Removes the element at the top of the stack and returns a reference to it.
*
* @return E : element popped from the top of the stack.
*
* @throws Exception on pop from an empty stack.
*/
public E pop() throws Exception {
// Make sure the stack is not empty. If it is empty, throw a StackEmpty
// exception.
//
if (isEmpty()) {
throw new Exception("AzureStackEmpty");
}
// Set a temporary reference equal to the element at the top of the stack,
// decrement the count of elements and return reference to the temporary.
//
E element = top.getElement();
top = top.getNext();
count--;
// Return the reference to the element that was at the top of the stack.
//
return element;
}
/*
* Return the top element of the stack without removing it.
*
* @return E
*
* @throws Exception on peek into an empty stack.
*/
public E peek() throws Exception {
// Make sure the stack is not empty. If it is empty, throw a StackEmpty
// exception.
//
if (isEmpty()) {
throw new Exception("AzureStackEmpty");
}
// Set a temporary reference equal to the element at the top of the stack
// and return the temporary.
//
E element = top.getElement();
return element;
}
/*
* Determines whether the stack is empty
*
* @return boolean true if the stack is empty and false otherwise.
*/
public boolean isEmpty() {
if (0 == size()) {
// Zero-sized stack so the stack is empty.
//
return true;
}
// The stack is not empty.
//
return false;
}
/*
* Determines the size of the stack
*
* @return int: Count of the number of elements in the stack.
*/
public int size() {
return count;
}
/*
* Returns a string representation of the stack.
*
* @return String String representation of all elements in the stack.
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
AzureLinkedNode<E> current = top;
for (int i = 0; i < size(); i++) {
E element = current.getElement();
sb.append(element.toString());
current = current.getNext();
// Insert commas between strings except after the last string.
//
if (size() - 1 > i) {
sb.append(", ");
}
}
// Return the string.
//
return sb.toString();
}
}
| 6,013 | 26.587156 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
/**
* Key provider that simply returns the storage account key from the
* configuration as plaintext.
*/
@InterfaceAudience.Private
public class SimpleKeyProvider implements KeyProvider {
protected static final String KEY_ACCOUNT_KEY_PREFIX =
"fs.azure.account.key.";
@Override
public String getStorageAccountKey(String accountName, Configuration conf)
throws KeyProviderException {
return conf.get(getStorageAccountKeyName(accountName));
}
protected String getStorageAccountKeyName(String accountName) {
return KEY_ACCOUNT_KEY_PREFIX + accountName;
}
}
| 1,542 | 34.068182 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProviderException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Thrown if there is a problem instantiating a KeyProvider or retrieving a key
* using a KeyProvider object.
*/
@InterfaceAudience.Private
public class KeyProviderException extends Exception {
private static final long serialVersionUID = 1L;
public KeyProviderException(String message) {
super(message);
}
public KeyProviderException(String message, Throwable cause) {
super(message, cause);
}
public KeyProviderException(Throwable t) {
super(t);
}
}
| 1,392 | 31.395349 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.CloudStorageAccount;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.RetryPolicyFactory;
import com.microsoft.azure.storage.StorageCredentials;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.StorageUri;
import com.microsoft.azure.storage.blob.BlobListingDetails;
import com.microsoft.azure.storage.blob.BlobProperties;
import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.CloudBlob;
import com.microsoft.azure.storage.blob.CloudBlobClient;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlobDirectory;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
import com.microsoft.azure.storage.blob.CloudPageBlob;
import com.microsoft.azure.storage.blob.CopyState;
import com.microsoft.azure.storage.blob.DeleteSnapshotsOption;
import com.microsoft.azure.storage.blob.ListBlobItem;
import com.microsoft.azure.storage.blob.PageRange;
/**
* A real implementation of the Azure interaction layer that just redirects
* calls to the Windows Azure storage SDK.
*/
@InterfaceAudience.Private
class StorageInterfaceImpl extends StorageInterface {
private CloudBlobClient serviceClient;
@Override
public void setRetryPolicyFactory(final RetryPolicyFactory retryPolicyFactory) {
serviceClient.getDefaultRequestOptions().setRetryPolicyFactory(
retryPolicyFactory);
}
@Override
public void setTimeoutInMs(int timeoutInMs) {
serviceClient.getDefaultRequestOptions().setTimeoutIntervalInMs(
timeoutInMs);
}
@Override
public void createBlobClient(CloudStorageAccount account) {
serviceClient = account.createCloudBlobClient();
}
@Override
public void createBlobClient(URI baseUri) {
serviceClient = new CloudBlobClient(baseUri);
}
@Override
public void createBlobClient(URI baseUri, StorageCredentials credentials) {
serviceClient = new CloudBlobClient(baseUri, credentials);
}
@Override
public StorageCredentials getCredentials() {
return serviceClient.getCredentials();
}
@Override
public CloudBlobContainerWrapper getContainerReference(String uri)
throws URISyntaxException, StorageException {
return new CloudBlobContainerWrapperImpl(
serviceClient.getContainerReference(uri));
}
//
// WrappingIterator
//
/**
* This iterator wraps every ListBlobItem as they come from the listBlobs()
* calls to their proper wrapping objects.
*/
private static class WrappingIterator implements Iterator<ListBlobItem> {
private final Iterator<ListBlobItem> present;
public WrappingIterator(Iterator<ListBlobItem> present) {
this.present = present;
}
public static Iterable<ListBlobItem> wrap(
final Iterable<ListBlobItem> present) {
return new Iterable<ListBlobItem>() {
@Override
public Iterator<ListBlobItem> iterator() {
return new WrappingIterator(present.iterator());
}
};
}
@Override
public boolean hasNext() {
return present.hasNext();
}
@Override
public ListBlobItem next() {
ListBlobItem unwrapped = present.next();
if (unwrapped instanceof CloudBlobDirectory) {
return new CloudBlobDirectoryWrapperImpl((CloudBlobDirectory) unwrapped);
} else if (unwrapped instanceof CloudBlockBlob) {
return new CloudBlockBlobWrapperImpl((CloudBlockBlob) unwrapped);
} else if (unwrapped instanceof CloudPageBlob) {
return new CloudPageBlobWrapperImpl((CloudPageBlob) unwrapped);
} else {
return unwrapped;
}
}
@Override
public void remove() {
present.remove();
}
}
//
// CloudBlobDirectoryWrapperImpl
//
@InterfaceAudience.Private
static class CloudBlobDirectoryWrapperImpl extends CloudBlobDirectoryWrapper {
private final CloudBlobDirectory directory;
public CloudBlobDirectoryWrapperImpl(CloudBlobDirectory directory) {
this.directory = directory;
}
@Override
public URI getUri() {
return directory.getUri();
}
@Override
public Iterable<ListBlobItem> listBlobs(String prefix,
boolean useFlatBlobListing, EnumSet<BlobListingDetails> listingDetails,
BlobRequestOptions options, OperationContext opContext)
throws URISyntaxException, StorageException {
return WrappingIterator.wrap(directory.listBlobs(prefix,
useFlatBlobListing, listingDetails, options, opContext));
}
@Override
public CloudBlobContainer getContainer() throws URISyntaxException,
StorageException {
return directory.getContainer();
}
@Override
public CloudBlobDirectory getParent() throws URISyntaxException,
StorageException {
return directory.getParent();
}
@Override
public StorageUri getStorageUri() {
return directory.getStorageUri();
}
}
//
// CloudBlobContainerWrapperImpl
//
@InterfaceAudience.Private
static class CloudBlobContainerWrapperImpl extends CloudBlobContainerWrapper {
private final CloudBlobContainer container;
public CloudBlobContainerWrapperImpl(CloudBlobContainer container) {
this.container = container;
}
@Override
public String getName() {
return container.getName();
}
@Override
public boolean exists(OperationContext opContext) throws StorageException {
return container.exists(AccessCondition.generateEmptyCondition(), null,
opContext);
}
@Override
public void create(OperationContext opContext) throws StorageException {
container.create(null, opContext);
}
@Override
public HashMap<String, String> getMetadata() {
return container.getMetadata();
}
@Override
public void setMetadata(HashMap<String, String> metadata) {
container.setMetadata(metadata);
}
@Override
public void downloadAttributes(OperationContext opContext)
throws StorageException {
container.downloadAttributes(AccessCondition.generateEmptyCondition(),
null, opContext);
}
@Override
public void uploadMetadata(OperationContext opContext)
throws StorageException {
container.uploadMetadata(AccessCondition.generateEmptyCondition(), null,
opContext);
}
@Override
public CloudBlobDirectoryWrapper getDirectoryReference(String relativePath)
throws URISyntaxException, StorageException {
CloudBlobDirectory dir = container.getDirectoryReference(relativePath);
return new CloudBlobDirectoryWrapperImpl(dir);
}
@Override
public CloudBlobWrapper getBlockBlobReference(String relativePath)
throws URISyntaxException, StorageException {
return new CloudBlockBlobWrapperImpl(container.getBlockBlobReference(relativePath));
}
@Override
public CloudBlobWrapper getPageBlobReference(String relativePath)
throws URISyntaxException, StorageException {
return new CloudPageBlobWrapperImpl(
container.getPageBlobReference(relativePath));
}
}
abstract static class CloudBlobWrapperImpl implements CloudBlobWrapper {
private final CloudBlob blob;
@Override
public CloudBlob getBlob() {
return blob;
}
public URI getUri() {
return getBlob().getUri();
}
protected CloudBlobWrapperImpl(CloudBlob blob) {
this.blob = blob;
}
@Override
public HashMap<String, String> getMetadata() {
return getBlob().getMetadata();
}
@Override
public void delete(OperationContext opContext, SelfRenewingLease lease)
throws StorageException {
getBlob().delete(DeleteSnapshotsOption.NONE, getLeaseCondition(lease),
null, opContext);
}
/**
* Return and access condition for this lease, or else null if
* there's no lease.
*/
private AccessCondition getLeaseCondition(SelfRenewingLease lease) {
AccessCondition leaseCondition = null;
if (lease != null) {
leaseCondition = AccessCondition.generateLeaseCondition(lease.getLeaseID());
}
return leaseCondition;
}
@Override
public boolean exists(OperationContext opContext)
throws StorageException {
return getBlob().exists(null, null, opContext);
}
@Override
public void downloadAttributes(
OperationContext opContext) throws StorageException {
getBlob().downloadAttributes(null, null, opContext);
}
@Override
public BlobProperties getProperties() {
return getBlob().getProperties();
}
@Override
public void setMetadata(HashMap<String, String> metadata) {
getBlob().setMetadata(metadata);
}
@Override
public InputStream openInputStream(
BlobRequestOptions options,
OperationContext opContext) throws StorageException {
return getBlob().openInputStream(null, options, opContext);
}
public OutputStream openOutputStream(
BlobRequestOptions options,
OperationContext opContext) throws StorageException {
return ((CloudBlockBlob) getBlob()).openOutputStream(null, options, opContext);
}
public void upload(InputStream sourceStream, OperationContext opContext)
throws StorageException, IOException {
getBlob().upload(sourceStream, 0, null, null, opContext);
}
@Override
public CloudBlobContainer getContainer() throws URISyntaxException,
StorageException {
return getBlob().getContainer();
}
@Override
public CloudBlobDirectory getParent() throws URISyntaxException,
StorageException {
return getBlob().getParent();
}
@Override
public void uploadMetadata(OperationContext opContext)
throws StorageException {
getBlob().uploadMetadata(null, null, opContext);
}
public void uploadProperties(OperationContext opContext, SelfRenewingLease lease)
throws StorageException {
// Include lease in request if lease not null.
getBlob().uploadProperties(getLeaseCondition(lease), null, opContext);
}
@Override
public void setStreamMinimumReadSizeInBytes(int minimumReadSizeBytes) {
getBlob().setStreamMinimumReadSizeInBytes(minimumReadSizeBytes);
}
@Override
public void setWriteBlockSizeInBytes(int writeBlockSizeBytes) {
getBlob().setStreamWriteSizeInBytes(writeBlockSizeBytes);
}
@Override
public StorageUri getStorageUri() {
return getBlob().getStorageUri();
}
@Override
public CopyState getCopyState() {
return getBlob().getCopyState();
}
@Override
public void startCopyFromBlob(CloudBlobWrapper sourceBlob, BlobRequestOptions options,
OperationContext opContext)
throws StorageException, URISyntaxException {
getBlob().startCopyFromBlob(((CloudBlobWrapperImpl)sourceBlob).blob,
null, null, options, opContext);
}
@Override
public void downloadRange(long offset, long length, OutputStream outStream,
BlobRequestOptions options, OperationContext opContext)
throws StorageException, IOException {
getBlob().downloadRange(offset, length, outStream, null, options, opContext);
}
@Override
public SelfRenewingLease acquireLease() throws StorageException {
return new SelfRenewingLease(this);
}
}
//
// CloudBlockBlobWrapperImpl
//
static class CloudBlockBlobWrapperImpl extends CloudBlobWrapperImpl implements CloudBlockBlobWrapper {
public CloudBlockBlobWrapperImpl(CloudBlockBlob blob) {
super(blob);
}
public OutputStream openOutputStream(
BlobRequestOptions options,
OperationContext opContext) throws StorageException {
return ((CloudBlockBlob) getBlob()).openOutputStream(null, options, opContext);
}
public void upload(InputStream sourceStream, OperationContext opContext)
throws StorageException, IOException {
getBlob().upload(sourceStream, 0, null, null, opContext);
}
public void uploadProperties(OperationContext opContext)
throws StorageException {
getBlob().uploadProperties(null, null, opContext);
}
}
static class CloudPageBlobWrapperImpl extends CloudBlobWrapperImpl implements CloudPageBlobWrapper {
public CloudPageBlobWrapperImpl(CloudPageBlob blob) {
super(blob);
}
public void create(final long length, BlobRequestOptions options,
OperationContext opContext) throws StorageException {
((CloudPageBlob) getBlob()).create(length, null, options, opContext);
}
public void uploadPages(final InputStream sourceStream, final long offset,
final long length, BlobRequestOptions options, OperationContext opContext)
throws StorageException, IOException {
((CloudPageBlob) getBlob()).uploadPages(sourceStream, offset, length, null,
options, opContext);
}
public ArrayList<PageRange> downloadPageRanges(BlobRequestOptions options,
OperationContext opContext) throws StorageException {
return ((CloudPageBlob) getBlob()).downloadPageRanges(
null, options, opContext);
}
}
}
| 14,542 | 30.008529 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.CloudStorageAccount;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.RetryPolicyFactory;
import com.microsoft.azure.storage.StorageCredentials;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.BlobListingDetails;
import com.microsoft.azure.storage.blob.BlobProperties;
import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.CloudBlob;
import com.microsoft.azure.storage.blob.CopyState;
import com.microsoft.azure.storage.blob.ListBlobItem;
import com.microsoft.azure.storage.blob.PageRange;
/**
* This is a very thin layer over the methods exposed by the Windows Azure
* Storage SDK that we need for WASB implementation. This base class has a real
* implementation that just simply redirects to the SDK, and a memory-backed one
* that's used for unit tests.
*
* IMPORTANT: all the methods here must remain very simple redirects since code
* written here can't be properly unit tested.
*/
@InterfaceAudience.Private
abstract class StorageInterface {
/**
* Sets the timeout to use when making requests to the storage service.
* <p>
* The server timeout interval begins at the time that the complete request
* has been received by the service, and the server begins processing the
* response. If the timeout interval elapses before the response is returned
* to the client, the operation times out. The timeout interval resets with
* each retry, if the request is retried.
*
* The default timeout interval for a request made via the service client is
* 90 seconds. You can change this value on the service client by setting this
* property, so that all subsequent requests made via the service client will
* use the new timeout interval. You can also change this value for an
* individual request, by setting the
* {@link RequestOptions#timeoutIntervalInMs} property.
*
* If you are downloading a large blob, you should increase the value of the
* timeout beyond the default value.
*
* @param timeoutInMs
* The timeout, in milliseconds, to use when making requests to the
* storage service.
*/
public abstract void setTimeoutInMs(int timeoutInMs);
/**
* Sets the RetryPolicyFactory object to use when making service requests.
*
* @param retryPolicyFactory
* the RetryPolicyFactory object to use when making service requests.
*/
public abstract void setRetryPolicyFactory(
final RetryPolicyFactory retryPolicyFactory);
/**
* Creates a new Blob service client.
*
*/
public abstract void createBlobClient(CloudStorageAccount account);
/**
* Creates an instance of the <code>CloudBlobClient</code> class using the
* specified Blob service endpoint.
*
* @param baseUri
* A <code>java.net.URI</code> object that represents the Blob
* service endpoint used to create the client.
*/
public abstract void createBlobClient(URI baseUri);
/**
* Creates an instance of the <code>CloudBlobClient</code> class using the
* specified Blob service endpoint and account credentials.
*
* @param baseUri
* A <code>java.net.URI</code> object that represents the Blob
* service endpoint used to create the client.
* @param credentials
* A {@link StorageCredentials} object that represents the account
* credentials.
*/
public abstract void createBlobClient(URI baseUri,
StorageCredentials credentials);
/**
* Returns the credentials for the Blob service, as configured for the storage
* account.
*
* @return A {@link StorageCredentials} object that represents the credentials
* for this storage account.
*/
public abstract StorageCredentials getCredentials();
/**
* Returns a reference to a {@link CloudBlobContainerWrapper} object that
* represents the cloud blob container for the specified address.
*
* @param name
* A <code>String</code> that represents the name of the container.
* @return A {@link CloudBlobContainerWrapper} object that represents a
* reference to the cloud blob container.
*
* @throws URISyntaxException
* If the resource URI is invalid.
* @throws StorageException
* If a storage service error occurred.
*/
public abstract CloudBlobContainerWrapper getContainerReference(String name)
throws URISyntaxException, StorageException;
/**
* A thin wrapper over the {@link CloudBlobDirectory} class that simply
* redirects calls to the real object except in unit tests.
*/
@InterfaceAudience.Private
public abstract static class CloudBlobDirectoryWrapper implements
ListBlobItem {
/**
* Returns the URI for this directory.
*
* @return A <code>java.net.URI</code> object that represents the URI for
* this directory.
*/
public abstract URI getUri();
/**
* Returns an enumerable collection of blob items whose names begin with the
* specified prefix, using the specified flat or hierarchical option,
* listing details options, request options, and operation context.
*
* @param prefix
* A <code>String</code> that represents the prefix of the blob
* name.
* @param useFlatBlobListing
* <code>true</code> to indicate that the returned list will be
* flat; <code>false</code> to indicate that the returned list will
* be hierarchical.
* @param listingDetails
* A <code>java.util.EnumSet</code> object that contains
* {@link BlobListingDetails} values that indicate whether
* snapshots, metadata, and/or uncommitted blocks are returned.
* Committed blocks are always returned.
* @param options
* A {@link BlobRequestOptions} object that specifies any
* additional options for the request. Specifying <code>null</code>
* will use the default request options from the associated service
* client ( {@link CloudBlobClient}).
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @return An enumerable collection of {@link ListBlobItem} objects that
* represent the block items whose names begin with the specified
* prefix in this directory.
*
* @throws StorageException
* If a storage service error occurred.
* @throws URISyntaxException
* If the resource URI is invalid.
*/
public abstract Iterable<ListBlobItem> listBlobs(String prefix,
boolean useFlatBlobListing, EnumSet<BlobListingDetails> listingDetails,
BlobRequestOptions options, OperationContext opContext)
throws URISyntaxException, StorageException;
}
/**
* A thin wrapper over the {@link CloudBlobContainer} class that simply
* redirects calls to the real object except in unit tests.
*/
@InterfaceAudience.Private
public abstract static class CloudBlobContainerWrapper {
/**
* Returns the name of the container.
*
* @return A <code>String</code> that represents the name of the container.
*/
public abstract String getName();
/**
* Returns a value that indicates whether the container exists, using the
* specified operation context.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @return <code>true</code> if the container exists, otherwise
* <code>false</code>.
*
* @throws StorageException
* If a storage service error occurred.
*/
public abstract boolean exists(OperationContext opContext)
throws StorageException;
/**
* Returns the metadata for the container.
*
* @return A <code>java.util.HashMap</code> object that represents the
* metadata for the container.
*/
public abstract HashMap<String, String> getMetadata();
/**
* Sets the metadata for the container.
*
* @param metadata
* A <code>java.util.HashMap</code> object that represents the
* metadata being assigned to the container.
*/
public abstract void setMetadata(HashMap<String, String> metadata);
/**
* Downloads the container's attributes, which consist of metadata and
* properties, using the specified operation context.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @throws StorageException
* If a storage service error occurred.
*/
public abstract void downloadAttributes(OperationContext opContext)
throws StorageException;
/**
* Uploads the container's metadata using the specified operation context.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @throws StorageException
* If a storage service error occurred.
*/
public abstract void uploadMetadata(OperationContext opContext)
throws StorageException;
/**
* Creates the container using the specified operation context.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @throws StorageException
* If a storage service error occurred.
*/
public abstract void create(OperationContext opContext)
throws StorageException;
/**
* Returns a wrapper for a CloudBlobDirectory.
*
* @param relativePath
* A <code>String</code> that represents the name of the directory,
* relative to the container
*
* @throws StorageException
* If a storage service error occurred.
*
* @throws URISyntaxException
* If URI syntax exception occurred.
*/
public abstract CloudBlobDirectoryWrapper getDirectoryReference(
String relativePath) throws URISyntaxException, StorageException;
/**
* Returns a wrapper for a CloudBlockBlob.
*
* @param relativePath
* A <code>String</code> that represents the name of the blob,
* relative to the container
*
* @throws StorageException
* If a storage service error occurred.
*
* @throws URISyntaxException
* If URI syntax exception occurred.
*/
public abstract CloudBlobWrapper getBlockBlobReference(
String relativePath) throws URISyntaxException, StorageException;
/**
* Returns a wrapper for a CloudPageBlob.
*
* @param relativePath
* A <code>String</code> that represents the name of the blob, relative to the container
*
* @throws StorageException
* If a storage service error occurred.
*
* @throws URISyntaxException
* If URI syntax exception occurred.
*/
public abstract CloudBlobWrapper getPageBlobReference(String relativePath)
throws URISyntaxException, StorageException;
}
/**
* A thin wrapper over the {@link CloudBlob} class that simply redirects calls
* to the real object except in unit tests.
*/
@InterfaceAudience.Private
public interface CloudBlobWrapper extends ListBlobItem {
/**
* Returns the URI for this blob.
*
* @return A <code>java.net.URI</code> object that represents the URI for
* the blob.
*/
URI getUri();
/**
* Returns the metadata for the blob.
*
* @return A <code>java.util.HashMap</code> object that represents the
* metadata for the blob.
*/
HashMap<String, String> getMetadata();
/**
* Sets the metadata for the blob.
*
* @param metadata
* A <code>java.util.HashMap</code> object that contains the
* metadata being assigned to the blob.
*/
void setMetadata(HashMap<String, String> metadata);
/**
* Copies an existing blob's contents, properties, and metadata to this instance of the <code>CloudBlob</code>
* class, using the specified operation context.
*
* @param sourceBlob
* A <code>CloudBlob</code> object that represents the source blob to copy.
* @param options
* A {@link BlobRequestOptions} object that specifies any additional options for the request. Specifying
* <code>null</code> will use the default request options from the associated service client (
* {@link CloudBlobClient}).
* @param opContext
* An {@link OperationContext} object that represents the context for the current operation. This object
* is used to track requests to the storage service, and to provide additional runtime information about
* the operation.
*
* @throws StorageException
* If a storage service error occurred.
* @throws URISyntaxException
*
*/
public abstract void startCopyFromBlob(CloudBlobWrapper sourceBlob,
BlobRequestOptions options, OperationContext opContext)
throws StorageException, URISyntaxException;
/**
* Returns the blob's copy state.
*
* @return A {@link CopyState} object that represents the copy state of the
* blob.
*/
CopyState getCopyState();
/**
* Downloads a range of bytes from the blob to the given byte buffer, using the specified request options and
* operation context.
*
* @param offset
* The byte offset to use as the starting point for the source.
* @param length
* The number of bytes to read.
* @param buffer
* The byte buffer, as an array of bytes, to which the blob bytes are downloaded.
* @param bufferOffset
* The byte offset to use as the starting point for the target.
* @param options
* A {@link BlobRequestOptions} object that specifies any additional options for the request. Specifying
* <code>null</code> will use the default request options from the associated service client (
* {@link CloudBlobClient}).
* @param opContext
* An {@link OperationContext} object that represents the context for the current operation. This object
* is used to track requests to the storage service, and to provide additional runtime information about
* the operation.
*
* @throws StorageException
* If a storage service error occurred.
*/
void downloadRange(final long offset, final long length,
final OutputStream outStream, final BlobRequestOptions options,
final OperationContext opContext)
throws StorageException, IOException;
/**
* Deletes the blob using the specified operation context.
* <p>
* A blob that has snapshots cannot be deleted unless the snapshots are also
* deleted. If a blob has snapshots, use the
* {@link DeleteSnapshotsOption#DELETE_SNAPSHOTS_ONLY} or
* {@link DeleteSnapshotsOption#INCLUDE_SNAPSHOTS} value in the
* <code>deleteSnapshotsOption</code> parameter to specify how the snapshots
* should be handled when the blob is deleted.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @throws StorageException
* If a storage service error occurred.
*/
void delete(OperationContext opContext, SelfRenewingLease lease)
throws StorageException;
/**
* Checks to see if the blob exists, using the specified operation context.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @return <code>true</code> if the blob exists, otherwise
* <code>false</code>.
*
* @throws StorageException
* If a storage service error occurred.
*/
boolean exists(OperationContext opContext)
throws StorageException;
/**
* Populates a blob's properties and metadata using the specified operation
* context.
* <p>
* This method populates the blob's system properties and user-defined
* metadata. Before reading a blob's properties or metadata, call this
* method or its overload to retrieve the latest values for the blob's
* properties and metadata from the Windows Azure storage service.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @throws StorageException
* If a storage service error occurred.
*/
void downloadAttributes(OperationContext opContext)
throws StorageException;
/**
* Returns the blob's properties.
*
* @return A {@link BlobProperties} object that represents the properties of
* the blob.
*/
BlobProperties getProperties();
/**
* Opens a blob input stream to download the blob using the specified
* operation context.
* <p>
* Use {@link CloudBlobClient#setStreamMinimumReadSizeInBytes} to configure
* the read size.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @return An <code>InputStream</code> object that represents the stream to
* use for reading from the blob.
*
* @throws StorageException
* If a storage service error occurred.
*/
InputStream openInputStream(BlobRequestOptions options,
OperationContext opContext) throws StorageException;
/**
* Uploads the blob's metadata to the storage service using the specified
* lease ID, request options, and operation context.
*
* @param opContext
* An {@link OperationContext} object that represents the context
* for the current operation. This object is used to track requests
* to the storage service, and to provide additional runtime
* information about the operation.
*
* @throws StorageException
* If a storage service error occurred.
*/
void uploadMetadata(OperationContext opContext)
throws StorageException;
void uploadProperties(OperationContext opContext,
SelfRenewingLease lease)
throws StorageException;
SelfRenewingLease acquireLease() throws StorageException;
/**
* Sets the minimum read block size to use with this Blob.
*
* @param minimumReadSizeBytes
* The maximum block size, in bytes, for reading from a block blob
* while using a {@link BlobInputStream} object, ranging from 512
* bytes to 64 MB, inclusive.
*/
void setStreamMinimumReadSizeInBytes(
int minimumReadSizeBytes);
/**
* Sets the write block size to use with this Blob.
*
* @param writeBlockSizeBytes
* The maximum block size, in bytes, for writing to a block blob
* while using a {@link BlobOutputStream} object, ranging from 1 MB
* to 4 MB, inclusive.
*
* @throws IllegalArgumentException
* If <code>writeBlockSizeInBytes</code> is less than 1 MB or
* greater than 4 MB.
*/
void setWriteBlockSizeInBytes(int writeBlockSizeBytes);
CloudBlob getBlob();
}
/**
* A thin wrapper over the {@link CloudBlockBlob} class that simply redirects calls
* to the real object except in unit tests.
*/
public abstract interface CloudBlockBlobWrapper
extends CloudBlobWrapper {
/**
* Creates and opens an output stream to write data to the block blob using the specified
* operation context.
*
* @param opContext
* An {@link OperationContext} object that represents the context for the current operation. This object
* is used to track requests to the storage service, and to provide additional runtime information about
* the operation.
*
* @return A {@link BlobOutputStream} object used to write data to the blob.
*
* @throws StorageException
* If a storage service error occurred.
*/
OutputStream openOutputStream(
BlobRequestOptions options,
OperationContext opContext) throws StorageException;
}
/**
* A thin wrapper over the {@link CloudPageBlob} class that simply redirects calls
* to the real object except in unit tests.
*/
public abstract interface CloudPageBlobWrapper
extends CloudBlobWrapper {
/**
* Creates a page blob using the specified request options and operation context.
*
* @param length
* The size, in bytes, of the page blob.
* @param options
* A {@link BlobRequestOptions} object that specifies any additional options for the request. Specifying
* <code>null</code> will use the default request options from the associated service client (
* {@link CloudBlobClient}).
* @param opContext
* An {@link OperationContext} object that represents the context for the current operation. This object
* is used to track requests to the storage service, and to provide additional runtime information about
* the operation.
*
* @throws IllegalArgumentException
* If the length is not a multiple of 512.
*
* @throws StorageException
* If a storage service error occurred.
*/
void create(final long length, BlobRequestOptions options,
OperationContext opContext) throws StorageException;
/**
* Uploads a range of contiguous pages, up to 4 MB in size, at the specified offset in the page blob, using the
* specified lease ID, request options, and operation context.
*
* @param sourceStream
* An <code>InputStream</code> object that represents the input stream to write to the page blob.
* @param offset
* The offset, in number of bytes, at which to begin writing the data. This value must be a multiple of
* 512.
* @param length
* The length, in bytes, of the data to write. This value must be a multiple of 512.
* @param options
* A {@link BlobRequestOptions} object that specifies any additional options for the request. Specifying
* <code>null</code> will use the default request options from the associated service client (
* {@link CloudBlobClient}).
* @param opContext
* An {@link OperationContext} object that represents the context for the current operation. This object
* is used to track requests to the storage service, and to provide additional runtime information about
* the operation.
*
* @throws IllegalArgumentException
* If the offset or length are not multiples of 512, or if the length is greater than 4 MB.
* @throws IOException
* If an I/O exception occurred.
* @throws StorageException
* If a storage service error occurred.
*/
void uploadPages(final InputStream sourceStream, final long offset,
final long length, BlobRequestOptions options,
OperationContext opContext) throws StorageException, IOException;
/**
* Returns a collection of page ranges and their starting and ending byte offsets using the specified request
* options and operation context.
*
* @param options
* A {@link BlobRequestOptions} object that specifies any additional options for the request. Specifying
* <code>null</code> will use the default request options from the associated service client (
* {@link CloudBlobClient}).
* @param opContext
* An {@link OperationContext} object that represents the context for the current operation. This object
* is used to track requests to the storage service, and to provide additional runtime information about
* the operation.
*
* @return An <code>ArrayList</code> object that represents the set of page ranges and their starting and ending
* byte offsets.
*
* @throws StorageException
* If a storage service error occurred.
*/
ArrayList<PageRange> downloadPageRanges(BlobRequestOptions options,
OperationContext opContext) throws StorageException;
void uploadMetadata(OperationContext opContext)
throws StorageException;
}
}
| 28,189 | 39.619597 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.DataInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.Set;
import java.util.TimeZone;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BufferedFSInputStream;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlob;
import com.microsoft.azure.storage.core.*;
/**
* A {@link FileSystem} for reading and writing files stored on <a
* href="http://store.azure.com/">Windows Azure</a>. This implementation is
* blob-based and stores files on Azure in their native form so they can be read
* by other Azure tools.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NativeAzureFileSystem extends FileSystem {
private static final int USER_WX_PERMISION = 0300;
/**
* A description of a folder rename operation, including the source and
* destination keys, and descriptions of the files in the source folder.
*/
public static class FolderRenamePending {
private SelfRenewingLease folderLease;
private String srcKey;
private String dstKey;
private FileMetadata[] fileMetadata = null; // descriptions of source files
private ArrayList<String> fileStrings = null;
private NativeAzureFileSystem fs;
private static final int MAX_RENAME_PENDING_FILE_SIZE = 10000000;
private static final int FORMATTING_BUFFER = 10000;
private boolean committed;
public static final String SUFFIX = "-RenamePending.json";
// Prepare in-memory information needed to do or redo a folder rename.
public FolderRenamePending(String srcKey, String dstKey, SelfRenewingLease lease,
NativeAzureFileSystem fs) throws IOException {
this.srcKey = srcKey;
this.dstKey = dstKey;
this.folderLease = lease;
this.fs = fs;
ArrayList<FileMetadata> fileMetadataList = new ArrayList<FileMetadata>();
// List all the files in the folder.
String priorLastKey = null;
do {
PartialListing listing = fs.getStoreInterface().listAll(srcKey, AZURE_LIST_ALL,
AZURE_UNBOUNDED_DEPTH, priorLastKey);
for(FileMetadata file : listing.getFiles()) {
fileMetadataList.add(file);
}
priorLastKey = listing.getPriorLastKey();
} while (priorLastKey != null);
fileMetadata = fileMetadataList.toArray(new FileMetadata[fileMetadataList.size()]);
this.committed = true;
}
// Prepare in-memory information needed to do or redo folder rename from
// a -RenamePending.json file read from storage. This constructor is to use during
// redo processing.
public FolderRenamePending(Path redoFile, NativeAzureFileSystem fs)
throws IllegalArgumentException, IOException {
this.fs = fs;
// open redo file
Path f = redoFile;
FSDataInputStream input = fs.open(f);
byte[] bytes = new byte[MAX_RENAME_PENDING_FILE_SIZE];
int l = input.read(bytes);
if (l < 0) {
throw new IOException(
"Error reading pending rename file contents -- no data available");
}
if (l == MAX_RENAME_PENDING_FILE_SIZE) {
throw new IOException(
"Error reading pending rename file contents -- "
+ "maximum file size exceeded");
}
String contents = new String(bytes, 0, l, Charset.forName("UTF-8"));
// parse the JSON
ObjectMapper objMapper = new ObjectMapper();
objMapper.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true);
JsonNode json = null;
try {
json = objMapper.readValue(contents, JsonNode.class);
this.committed = true;
} catch (JsonMappingException e) {
// The -RedoPending.json file is corrupted, so we assume it was
// not completely written
// and the redo operation did not commit.
this.committed = false;
} catch (JsonParseException e) {
this.committed = false;
} catch (IOException e) {
this.committed = false;
}
if (!this.committed) {
LOG.error("Deleting corruped rename pending file "
+ redoFile + "\n" + contents);
// delete the -RenamePending.json file
fs.delete(redoFile, false);
return;
}
// initialize this object's fields
ArrayList<String> fileStrList = new ArrayList<String>();
JsonNode oldFolderName = json.get("OldFolderName");
JsonNode newFolderName = json.get("NewFolderName");
if (oldFolderName == null || newFolderName == null) {
this.committed = false;
} else {
this.srcKey = oldFolderName.getTextValue();
this.dstKey = newFolderName.getTextValue();
if (this.srcKey == null || this.dstKey == null) {
this.committed = false;
} else {
JsonNode fileList = json.get("FileList");
if (fileList == null) {
this.committed = false;
} else {
for (int i = 0; i < fileList.size(); i++) {
fileStrList.add(fileList.get(i).getTextValue());
}
}
}
}
this.fileStrings = fileStrList;
}
public FileMetadata[] getFiles() {
return fileMetadata;
}
public SelfRenewingLease getFolderLease() {
return folderLease;
}
/**
* Write to disk the information needed to redo folder rename,
* in JSON format. The file name will be
* {@code wasb://<sourceFolderPrefix>/folderName-RenamePending.json}
* The file format will be:
* <pre>{@code
* {
* FormatVersion: "1.0",
* OperationTime: "<YYYY-MM-DD HH:MM:SS.MMM>",
* OldFolderName: "<key>",
* NewFolderName: "<key>",
* FileList: [ <string> , <string> , ... ]
* }
*
* Here's a sample:
* {
* FormatVersion: "1.0",
* OperationUTCTime: "2014-07-01 23:50:35.572",
* OldFolderName: "user/ehans/folderToRename",
* NewFolderName: "user/ehans/renamedFolder",
* FileList: [
* "innerFile",
* "innerFile2"
* ]
* } }</pre>
* @throws IOException
*/
public void writeFile(FileSystem fs) throws IOException {
Path path = getRenamePendingFilePath();
if (LOG.isDebugEnabled()){
LOG.debug("Preparing to write atomic rename state to " + path.toString());
}
OutputStream output = null;
String contents = makeRenamePendingFileContents();
// Write file.
try {
output = fs.create(path);
output.write(contents.getBytes(Charset.forName("UTF-8")));
} catch (IOException e) {
throw new IOException("Unable to write RenamePending file for folder rename from "
+ srcKey + " to " + dstKey, e);
} finally {
IOUtils.cleanup(LOG, output);
}
}
/**
* Return the contents of the JSON file to represent the operations
* to be performed for a folder rename.
*/
public String makeRenamePendingFileContents() {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
sdf.setTimeZone(TimeZone.getTimeZone("UTC"));
String time = sdf.format(new Date());
// Make file list string
StringBuilder builder = new StringBuilder();
builder.append("[\n");
for (int i = 0; i != fileMetadata.length; i++) {
if (i > 0) {
builder.append(",\n");
}
builder.append(" ");
String noPrefix = StringUtils.removeStart(fileMetadata[i].getKey(), srcKey + "/");
// Quote string file names, escaping any possible " characters or other
// necessary characters in the name.
builder.append(quote(noPrefix));
if (builder.length() >=
MAX_RENAME_PENDING_FILE_SIZE - FORMATTING_BUFFER) {
// Give up now to avoid using too much memory.
LOG.error("Internal error: Exceeded maximum rename pending file size of "
+ MAX_RENAME_PENDING_FILE_SIZE + " bytes.");
// return some bad JSON with an error message to make it human readable
return "exceeded maximum rename pending file size";
}
}
builder.append("\n ]");
String fileList = builder.toString();
// Make file contents as a string. Again, quote file names, escaping
// characters as appropriate.
String contents = "{\n"
+ " FormatVersion: \"1.0\",\n"
+ " OperationUTCTime: \"" + time + "\",\n"
+ " OldFolderName: " + quote(srcKey) + ",\n"
+ " NewFolderName: " + quote(dstKey) + ",\n"
+ " FileList: " + fileList + "\n"
+ "}\n";
return contents;
}
/**
* This is an exact copy of org.codehaus.jettison.json.JSONObject.quote
* method.
*
* Produce a string in double quotes with backslash sequences in all the
* right places. A backslash will be inserted within </, allowing JSON
* text to be delivered in HTML. In JSON text, a string cannot contain a
* control character or an unescaped quote or backslash.
* @param string A String
* @return A String correctly formatted for insertion in a JSON text.
*/
private String quote(String string) {
if (string == null || string.length() == 0) {
return "\"\"";
}
char c = 0;
int i;
int len = string.length();
StringBuilder sb = new StringBuilder(len + 4);
String t;
sb.append('"');
for (i = 0; i < len; i += 1) {
c = string.charAt(i);
switch (c) {
case '\\':
case '"':
sb.append('\\');
sb.append(c);
break;
case '/':
sb.append('\\');
sb.append(c);
break;
case '\b':
sb.append("\\b");
break;
case '\t':
sb.append("\\t");
break;
case '\n':
sb.append("\\n");
break;
case '\f':
sb.append("\\f");
break;
case '\r':
sb.append("\\r");
break;
default:
if (c < ' ') {
t = "000" + Integer.toHexString(c);
sb.append("\\u" + t.substring(t.length() - 4));
} else {
sb.append(c);
}
}
}
sb.append('"');
return sb.toString();
}
public String getSrcKey() {
return srcKey;
}
public String getDstKey() {
return dstKey;
}
public FileMetadata getSourceMetadata() throws IOException {
return fs.getStoreInterface().retrieveMetadata(srcKey);
}
/**
* Execute a folder rename. This is the execution path followed
* when everything is working normally. See redo() for the alternate
* execution path for the case where we're recovering from a folder rename
* failure.
* @throws IOException
*/
public void execute() throws IOException {
for (FileMetadata file : this.getFiles()) {
// Rename all materialized entries under the folder to point to the
// final destination.
if (file.getBlobMaterialization() == BlobMaterialization.Explicit) {
String srcName = file.getKey();
String suffix = srcName.substring((this.getSrcKey()).length());
String dstName = this.getDstKey() + suffix;
// Rename gets exclusive access (via a lease) for files
// designated for atomic rename.
// The main use case is for HBase write-ahead log (WAL) and data
// folder processing correctness. See the rename code for details.
boolean acquireLease = fs.getStoreInterface().isAtomicRenameKey(srcName);
fs.getStoreInterface().rename(srcName, dstName, acquireLease, null);
}
}
// Rename the source folder 0-byte root file itself.
FileMetadata srcMetadata2 = this.getSourceMetadata();
if (srcMetadata2.getBlobMaterialization() ==
BlobMaterialization.Explicit) {
// It already has a lease on it from the "prepare" phase so there's no
// need to get one now. Pass in existing lease to allow file delete.
fs.getStoreInterface().rename(this.getSrcKey(), this.getDstKey(),
false, folderLease);
}
// Update the last-modified time of the parent folders of both source and
// destination.
fs.updateParentFolderLastModifiedTime(srcKey);
fs.updateParentFolderLastModifiedTime(dstKey);
}
/** Clean up after execution of rename.
* @throws IOException */
public void cleanup() throws IOException {
if (fs.getStoreInterface().isAtomicRenameKey(srcKey)) {
// Remove RenamePending file
fs.delete(getRenamePendingFilePath(), false);
// Freeing source folder lease is not necessary since the source
// folder file was deleted.
}
}
private Path getRenamePendingFilePath() {
String fileName = srcKey + SUFFIX;
Path fileNamePath = keyToPath(fileName);
Path path = fs.makeAbsolute(fileNamePath);
return path;
}
/**
* Recover from a folder rename failure by redoing the intended work,
* as recorded in the -RenamePending.json file.
*
* @throws IOException
*/
public void redo() throws IOException {
if (!committed) {
// Nothing to do. The -RedoPending.json file should have already been
// deleted.
return;
}
// Try to get a lease on source folder to block concurrent access to it.
// It may fail if the folder is already gone. We don't check if the
// source exists explicitly because that could recursively trigger redo
// and give an infinite recursion.
SelfRenewingLease lease = null;
boolean sourceFolderGone = false;
try {
lease = fs.leaseSourceFolder(srcKey);
} catch (AzureException e) {
// If the source folder was not found then somebody probably
// raced with us and finished the rename first, or the
// first rename failed right before deleting the rename pending
// file.
String errorCode = "";
try {
StorageException se = (StorageException) e.getCause();
errorCode = se.getErrorCode();
} catch (Exception e2) {
; // do nothing -- could not get errorCode
}
if (errorCode.equals("BlobNotFound")) {
sourceFolderGone = true;
} else {
throw new IOException(
"Unexpected error when trying to lease source folder name during "
+ "folder rename redo",
e);
}
}
if (!sourceFolderGone) {
// Make sure the target folder exists.
Path dst = fullPath(dstKey);
if (!fs.exists(dst)) {
fs.mkdirs(dst);
}
// For each file inside the folder to be renamed,
// make sure it has been renamed.
for(String fileName : fileStrings) {
finishSingleFileRename(fileName);
}
// Remove the source folder. Don't check explicitly if it exists,
// to avoid triggering redo recursively.
try {
fs.getStoreInterface().delete(srcKey, lease);
} catch (Exception e) {
LOG.info("Unable to delete source folder during folder rename redo. "
+ "If the source folder is already gone, this is not an error "
+ "condition. Continuing with redo.", e);
}
// Update the last-modified time of the parent folders of both source
// and destination.
fs.updateParentFolderLastModifiedTime(srcKey);
fs.updateParentFolderLastModifiedTime(dstKey);
}
// Remove the -RenamePending.json file.
fs.delete(getRenamePendingFilePath(), false);
}
// See if the source file is still there, and if it is, rename it.
private void finishSingleFileRename(String fileName)
throws IOException {
Path srcFile = fullPath(srcKey, fileName);
Path dstFile = fullPath(dstKey, fileName);
boolean srcExists = fs.exists(srcFile);
boolean dstExists = fs.exists(dstFile);
if (srcExists && !dstExists) {
// Rename gets exclusive access (via a lease) for HBase write-ahead log
// (WAL) file processing correctness. See the rename code for details.
String srcName = fs.pathToKey(srcFile);
String dstName = fs.pathToKey(dstFile);
fs.getStoreInterface().rename(srcName, dstName, true, null);
} else if (srcExists && dstExists) {
// Get a lease on source to block write access.
String srcName = fs.pathToKey(srcFile);
SelfRenewingLease lease = fs.acquireLease(srcFile);
// Delete the file. This will free the lease too.
fs.getStoreInterface().delete(srcName, lease);
} else if (!srcExists && dstExists) {
// The rename already finished, so do nothing.
;
} else {
throw new IOException(
"Attempting to complete rename of file " + srcKey + "/" + fileName
+ " during folder rename redo, and file was not found in source "
+ "or destination.");
}
}
// Return an absolute path for the specific fileName within the folder
// specified by folderKey.
private Path fullPath(String folderKey, String fileName) {
return new Path(new Path(fs.getUri()), "/" + folderKey + "/" + fileName);
}
private Path fullPath(String fileKey) {
return new Path(new Path(fs.getUri()), "/" + fileKey);
}
}
private static final String TRAILING_PERIOD_PLACEHOLDER = "[[.]]";
private static final Pattern TRAILING_PERIOD_PLACEHOLDER_PATTERN =
Pattern.compile("\\[\\[\\.\\]\\](?=$|/)");
private static final Pattern TRAILING_PERIOD_PATTERN = Pattern.compile("\\.(?=$|/)");
@Override
public String getScheme() {
return "wasb";
}
/**
* <p>
* A {@link FileSystem} for reading and writing files stored on <a
* href="http://store.azure.com/">Windows Azure</a>. This implementation is
* blob-based and stores files on Azure in their native form so they can be read
* by other Azure tools. This implementation uses HTTPS for secure network communication.
* </p>
*/
public static class Secure extends NativeAzureFileSystem {
@Override
public String getScheme() {
return "wasbs";
}
}
public static final Log LOG = LogFactory.getLog(NativeAzureFileSystem.class);
static final String AZURE_BLOCK_SIZE_PROPERTY_NAME = "fs.azure.block.size";
/**
* The time span in seconds before which we consider a temp blob to be
* dangling (not being actively uploaded to) and up for reclamation.
*
* So e.g. if this is 60, then any temporary blobs more than a minute old
* would be considered dangling.
*/
static final String AZURE_TEMP_EXPIRY_PROPERTY_NAME = "fs.azure.fsck.temp.expiry.seconds";
private static final int AZURE_TEMP_EXPIRY_DEFAULT = 3600;
static final String PATH_DELIMITER = Path.SEPARATOR;
static final String AZURE_TEMP_FOLDER = "_$azuretmpfolder$";
private static final int AZURE_LIST_ALL = -1;
private static final int AZURE_UNBOUNDED_DEPTH = -1;
private static final long MAX_AZURE_BLOCK_SIZE = 512 * 1024 * 1024L;
/**
* The configuration property that determines which group owns files created
* in WASB.
*/
private static final String AZURE_DEFAULT_GROUP_PROPERTY_NAME = "fs.azure.permissions.supergroup";
/**
* The default value for fs.azure.permissions.supergroup. Chosen as the same
* default as DFS.
*/
static final String AZURE_DEFAULT_GROUP_DEFAULT = "supergroup";
static final String AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME =
"fs.azure.block.location.impersonatedhost";
private static final String AZURE_BLOCK_LOCATION_HOST_DEFAULT =
"localhost";
static final String AZURE_RINGBUFFER_CAPACITY_PROPERTY_NAME =
"fs.azure.ring.buffer.capacity";
static final String AZURE_OUTPUT_STREAM_BUFFER_SIZE_PROPERTY_NAME =
"fs.azure.output.stream.buffer.size";
public static final String SKIP_AZURE_METRICS_PROPERTY_NAME = "fs.azure.skip.metrics";
private class NativeAzureFsInputStream extends FSInputStream {
private InputStream in;
private final String key;
private long pos = 0;
private boolean closed = false;
private boolean isPageBlob;
// File length, valid only for streams over block blobs.
private long fileLength;
public NativeAzureFsInputStream(DataInputStream in, String key, long fileLength) {
this.in = in;
this.key = key;
this.isPageBlob = store.isPageBlobKey(key);
this.fileLength = fileLength;
}
/**
* Return the size of the remaining available bytes
* if the size is less than or equal to {@link Integer#MAX_VALUE},
* otherwise, return {@link Integer#MAX_VALUE}.
*
* This is to match the behavior of DFSInputStream.available(),
* which some clients may rely on (HBase write-ahead log reading in
* particular).
*/
@Override
public synchronized int available() throws IOException {
if (isPageBlob) {
return in.available();
} else {
if (closed) {
throw new IOException("Stream closed");
}
final long remaining = this.fileLength - pos;
return remaining <= Integer.MAX_VALUE ?
(int) remaining : Integer.MAX_VALUE;
}
}
/*
* Reads the next byte of data from the input stream. The value byte is
* returned as an integer in the range 0 to 255. If no byte is available
* because the end of the stream has been reached, the value -1 is returned.
* This method blocks until input data is available, the end of the stream
* is detected, or an exception is thrown.
*
* @returns int An integer corresponding to the byte read.
*/
@Override
public synchronized int read() throws IOException {
int result = 0;
result = in.read();
if (result != -1) {
pos++;
if (statistics != null) {
statistics.incrementBytesRead(1);
}
}
// Return to the caller with the result.
//
return result;
}
/*
* Reads up to len bytes of data from the input stream into an array of
* bytes. An attempt is made to read as many as len bytes, but a smaller
* number may be read. The number of bytes actually read is returned as an
* integer. This method blocks until input data is available, end of file is
* detected, or an exception is thrown. If len is zero, then no bytes are
* read and 0 is returned; otherwise, there is an attempt to read at least
* one byte. If no byte is available because the stream is at end of file,
* the value -1 is returned; otherwise, at least one byte is read and stored
* into b.
*
* @param b -- the buffer into which data is read
*
* @param off -- the start offset in the array b at which data is written
*
* @param len -- the maximum number of bytes read
*
* @ returns int The total number of byes read into the buffer, or -1 if
* there is no more data because the end of stream is reached.
*/
@Override
public synchronized int read(byte[] b, int off, int len) throws IOException {
int result = 0;
result = in.read(b, off, len);
if (result > 0) {
pos += result;
}
if (null != statistics) {
statistics.incrementBytesRead(result);
}
// Return to the caller with the result.
return result;
}
@Override
public void close() throws IOException {
in.close();
closed = true;
}
@Override
public synchronized void seek(long pos) throws IOException {
in.close();
in = store.retrieve(key);
this.pos = in.skip(pos);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Seek to position %d. Bytes skipped %d", pos,
this.pos));
}
}
@Override
public synchronized long getPos() throws IOException {
return pos;
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
}
private class NativeAzureFsOutputStream extends OutputStream {
// We should not override flush() to actually close current block and flush
// to DFS, this will break applications that assume flush() is a no-op.
// Applications are advised to use Syncable.hflush() for that purpose.
// NativeAzureFsOutputStream needs to implement Syncable if needed.
private String key;
private String keyEncoded;
private OutputStream out;
public NativeAzureFsOutputStream(OutputStream out, String aKey,
String anEncodedKey) throws IOException {
// Check input arguments. The output stream should be non-null and the
// keys
// should be valid strings.
if (null == out) {
throw new IllegalArgumentException(
"Illegal argument: the output stream is null.");
}
if (null == aKey || 0 == aKey.length()) {
throw new IllegalArgumentException(
"Illegal argument the key string is null or empty");
}
if (null == anEncodedKey || 0 == anEncodedKey.length()) {
throw new IllegalArgumentException(
"Illegal argument the encoded key string is null or empty");
}
// Initialize the member variables with the incoming parameters.
this.out = out;
setKey(aKey);
setEncodedKey(anEncodedKey);
}
@Override
public synchronized void close() throws IOException {
if (out != null) {
// Close the output stream and decode the key for the output stream
// before returning to the caller.
//
out.close();
restoreKey();
out = null;
}
}
/**
* Writes the specified byte to this output stream. The general contract for
* write is that one byte is written to the output stream. The byte to be
* written is the eight low-order bits of the argument b. The 24 high-order
* bits of b are ignored.
*
* @param b
* 32-bit integer of block of 4 bytes
*/
@Override
public void write(int b) throws IOException {
out.write(b);
}
/**
* Writes b.length bytes from the specified byte array to this output
* stream. The general contract for write(b) is that it should have exactly
* the same effect as the call write(b, 0, b.length).
*
* @param b
* Block of bytes to be written to the output stream.
*/
@Override
public void write(byte[] b) throws IOException {
out.write(b);
}
/**
* Writes <code>len</code> from the specified byte array starting at offset
* <code>off</code> to the output stream. The general contract for write(b,
* off, len) is that some of the bytes in the array <code>
* b</code b> are written to the output stream in order; element
* <code>b[off]</code> is the first byte written and
* <code>b[off+len-1]</code> is the last byte written by this operation.
*
* @param b
* Byte array to be written.
* @param off
* Write this offset in stream.
* @param len
* Number of bytes to be written.
*/
@Override
public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
}
/**
* Get the blob name.
*
* @return String Blob name.
*/
public String getKey() {
return key;
}
/**
* Set the blob name.
*
* @param key
* Blob name.
*/
public void setKey(String key) {
this.key = key;
}
/**
* Get the blob name.
*
* @return String Blob name.
*/
public String getEncodedKey() {
return keyEncoded;
}
/**
* Set the blob name.
*
* @param anEncodedKey
* Blob name.
*/
public void setEncodedKey(String anEncodedKey) {
this.keyEncoded = anEncodedKey;
}
/**
* Restore the original key name from the m_key member variable. Note: The
* output file stream is created with an encoded blob store key to guarantee
* load balancing on the front end of the Azure storage partition servers.
* The create also includes the name of the original key value which is
* stored in the m_key member variable. This method should only be called
* when the stream is closed.
*/
private void restoreKey() throws IOException {
store.rename(getEncodedKey(), getKey());
}
}
private URI uri;
private NativeFileSystemStore store;
private AzureNativeFileSystemStore actualStore;
private Path workingDir;
private long blockSize = MAX_AZURE_BLOCK_SIZE;
private AzureFileSystemInstrumentation instrumentation;
private String metricsSourceName;
private boolean isClosed = false;
private static boolean suppressRetryPolicy = false;
// A counter to create unique (within-process) names for my metrics sources.
private static AtomicInteger metricsSourceNameCounter = new AtomicInteger();
public NativeAzureFileSystem() {
// set store in initialize()
}
public NativeAzureFileSystem(NativeFileSystemStore store) {
this.store = store;
}
/**
* Suppress the default retry policy for the Storage, useful in unit tests to
* test negative cases without waiting forever.
*/
@VisibleForTesting
static void suppressRetryPolicy() {
suppressRetryPolicy = true;
}
/**
* Undo the effect of suppressRetryPolicy.
*/
@VisibleForTesting
static void resumeRetryPolicy() {
suppressRetryPolicy = false;
}
/**
* Creates a new metrics source name that's unique within this process.
*/
@VisibleForTesting
public static String newMetricsSourceName() {
int number = metricsSourceNameCounter.incrementAndGet();
final String baseName = "AzureFileSystemMetrics";
if (number == 1) { // No need for a suffix for the first one
return baseName;
} else {
return baseName + number;
}
}
/**
* Checks if the given URI scheme is a scheme that's affiliated with the Azure
* File System.
*
* @param scheme
* The URI scheme.
* @return true iff it's an Azure File System URI scheme.
*/
private static boolean isWasbScheme(String scheme) {
// The valid schemes are: asv (old name), asvs (old name over HTTPS),
// wasb (new name), wasbs (new name over HTTPS).
return scheme != null
&& (scheme.equalsIgnoreCase("asv") || scheme.equalsIgnoreCase("asvs")
|| scheme.equalsIgnoreCase("wasb") || scheme
.equalsIgnoreCase("wasbs"));
}
/**
* Puts in the authority of the default file system if it is a WASB file
* system and the given URI's authority is null.
*
* @return The URI with reconstructed authority if necessary and possible.
*/
private static URI reconstructAuthorityIfNeeded(URI uri, Configuration conf) {
if (null == uri.getAuthority()) {
// If WASB is the default file system, get the authority from there
URI defaultUri = FileSystem.getDefaultUri(conf);
if (defaultUri != null && isWasbScheme(defaultUri.getScheme())) {
try {
// Reconstruct the URI with the authority from the default URI.
return new URI(uri.getScheme(), defaultUri.getAuthority(),
uri.getPath(), uri.getQuery(), uri.getFragment());
} catch (URISyntaxException e) {
// This should never happen.
throw new Error("Bad URI construction", e);
}
}
}
return uri;
}
@Override
protected void checkPath(Path path) {
// Make sure to reconstruct the path's authority if needed
super.checkPath(new Path(reconstructAuthorityIfNeeded(path.toUri(),
getConf())));
}
@Override
public void initialize(URI uri, Configuration conf)
throws IOException, IllegalArgumentException {
// Check authority for the URI to guarantee that it is non-null.
uri = reconstructAuthorityIfNeeded(uri, conf);
if (null == uri.getAuthority()) {
final String errMsg = String
.format("Cannot initialize WASB file system, URI authority not recognized.");
throw new IllegalArgumentException(errMsg);
}
super.initialize(uri, conf);
if (store == null) {
store = createDefaultStore(conf);
}
instrumentation = new AzureFileSystemInstrumentation(conf);
if(!conf.getBoolean(SKIP_AZURE_METRICS_PROPERTY_NAME, false)) {
// Make sure the metrics system is available before interacting with Azure
AzureFileSystemMetricsSystem.fileSystemStarted();
metricsSourceName = newMetricsSourceName();
String sourceDesc = "Azure Storage Volume File System metrics";
AzureFileSystemMetricsSystem.registerSource(metricsSourceName, sourceDesc,
instrumentation);
}
store.initialize(uri, conf, instrumentation);
setConf(conf);
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
this.workingDir = new Path("/user", UserGroupInformation.getCurrentUser()
.getShortUserName()).makeQualified(getUri(), getWorkingDirectory());
this.blockSize = conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME,
MAX_AZURE_BLOCK_SIZE);
if (LOG.isDebugEnabled()) {
LOG.debug("NativeAzureFileSystem. Initializing.");
LOG.debug(" blockSize = "
+ conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME, MAX_AZURE_BLOCK_SIZE));
}
}
private NativeFileSystemStore createDefaultStore(Configuration conf) {
actualStore = new AzureNativeFileSystemStore();
if (suppressRetryPolicy) {
actualStore.suppressRetryPolicy();
}
return actualStore;
}
/**
* Azure Storage doesn't allow the blob names to end in a period,
* so encode this here to work around that limitation.
*/
private static String encodeTrailingPeriod(String toEncode) {
Matcher matcher = TRAILING_PERIOD_PATTERN.matcher(toEncode);
return matcher.replaceAll(TRAILING_PERIOD_PLACEHOLDER);
}
/**
* Reverse the encoding done by encodeTrailingPeriod().
*/
private static String decodeTrailingPeriod(String toDecode) {
Matcher matcher = TRAILING_PERIOD_PLACEHOLDER_PATTERN.matcher(toDecode);
return matcher.replaceAll(".");
}
/**
* Convert the path to a key. By convention, any leading or trailing slash is
* removed, except for the special case of a single slash.
*/
@VisibleForTesting
public String pathToKey(Path path) {
// Convert the path to a URI to parse the scheme, the authority, and the
// path from the path object.
URI tmpUri = path.toUri();
String pathUri = tmpUri.getPath();
// The scheme and authority is valid. If the path does not exist add a "/"
// separator to list the root of the container.
Path newPath = path;
if ("".equals(pathUri)) {
newPath = new Path(tmpUri.toString() + Path.SEPARATOR);
}
// Verify path is absolute if the path refers to a windows drive scheme.
if (!newPath.isAbsolute()) {
throw new IllegalArgumentException("Path must be absolute: " + path);
}
String key = null;
key = newPath.toUri().getPath();
key = removeTrailingSlash(key);
key = encodeTrailingPeriod(key);
if (key.length() == 1) {
return key;
} else {
return key.substring(1); // remove initial slash
}
}
// Remove any trailing slash except for the case of a single slash.
private static String removeTrailingSlash(String key) {
if (key.length() == 0 || key.length() == 1) {
return key;
}
if (key.charAt(key.length() - 1) == '/') {
return key.substring(0, key.length() - 1);
} else {
return key;
}
}
private static Path keyToPath(String key) {
if (key.equals("/")) {
return new Path("/"); // container
}
return new Path("/" + decodeTrailingPeriod(key));
}
/**
* Get the absolute version of the path (fully qualified).
* This is public for testing purposes.
*
* @param path
* @return fully qualified path
*/
@VisibleForTesting
public Path makeAbsolute(Path path) {
if (path.isAbsolute()) {
return path;
}
return new Path(workingDir, path);
}
/**
* For unit test purposes, retrieves the AzureNativeFileSystemStore store
* backing this file system.
*
* @return The store object.
*/
@VisibleForTesting
public AzureNativeFileSystemStore getStore() {
return actualStore;
}
NativeFileSystemStore getStoreInterface() {
return store;
}
/**
* Gets the metrics source for this file system.
* This is mainly here for unit testing purposes.
*
* @return the metrics source.
*/
public AzureFileSystemInstrumentation getInstrumentation() {
return instrumentation;
}
/** This optional operation is not yet supported. */
@Override
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress)
throws IOException {
throw new IOException("Not supported");
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return create(f, permission, overwrite, true,
bufferSize, replication, blockSize, progress,
(SelfRenewingLease) null);
}
/**
* Get a self-renewing lease on the specified file.
*/
public SelfRenewingLease acquireLease(Path path) throws AzureException {
String fullKey = pathToKey(makeAbsolute(path));
return getStore().acquireLease(fullKey);
}
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
Path parent = f.getParent();
// Get exclusive access to folder if this is a directory designated
// for atomic rename. The primary use case of for HBase write-ahead
// log file management.
SelfRenewingLease lease = null;
if (store.isAtomicRenameKey(pathToKey(f))) {
try {
lease = acquireLease(parent);
} catch (AzureException e) {
String errorCode = "";
try {
StorageException e2 = (StorageException) e.getCause();
errorCode = e2.getErrorCode();
} catch (Exception e3) {
// do nothing if cast fails
}
if (errorCode.equals("BlobNotFound")) {
throw new FileNotFoundException("Cannot create file " +
f.getName() + " because parent folder does not exist.");
}
LOG.warn("Got unexpected exception trying to get lease on "
+ pathToKey(parent) + ". " + e.getMessage());
throw e;
}
}
// See if the parent folder exists. If not, throw error.
// The exists() check will push any pending rename operation forward,
// if there is one, and return false.
//
// At this point, we have exclusive access to the source folder
// via the lease, so we will not conflict with an active folder
// rename operation.
if (!exists(parent)) {
try {
// This'll let the keep-alive thread exit as soon as it wakes up.
lease.free();
} catch (Exception e) {
LOG.warn("Unable to free lease because: " + e.getMessage());
}
throw new FileNotFoundException("Cannot create file " +
f.getName() + " because parent folder does not exist.");
}
// Create file inside folder.
FSDataOutputStream out = null;
try {
out = create(f, permission, overwrite, false,
bufferSize, replication, blockSize, progress, lease);
} finally {
// Release exclusive access to folder.
try {
if (lease != null) {
lease.free();
}
} catch (Exception e) {
IOUtils.cleanup(LOG, out);
String msg = "Unable to free lease on " + parent.toUri();
LOG.error(msg);
throw new IOException(msg, e);
}
}
return out;
}
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
// Check if file should be appended or overwritten. Assume that the file
// is overwritten on if the CREATE and OVERWRITE create flags are set. Note
// that any other combinations of create flags will result in an open new or
// open with append.
final EnumSet<CreateFlag> createflags =
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
boolean overwrite = flags.containsAll(createflags);
// Delegate the create non-recursive call.
return this.createNonRecursive(f, permission, overwrite,
bufferSize, replication, blockSize, progress);
}
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(Path f,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return this.createNonRecursive(f, FsPermission.getFileDefault(),
overwrite, bufferSize, replication, blockSize, progress);
}
/**
* Create an Azure blob and return an output stream to use
* to write data to it.
*
* @param f
* @param permission
* @param overwrite
* @param createParent
* @param bufferSize
* @param replication
* @param blockSize
* @param progress
* @param parentFolderLease Lease on parent folder (or null if
* no lease).
* @return
* @throws IOException
*/
private FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, boolean createParent, int bufferSize,
short replication, long blockSize, Progressable progress,
SelfRenewingLease parentFolderLease)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Creating file: " + f.toString());
}
if (containsColon(f)) {
throw new IOException("Cannot create file " + f
+ " through WASB that has colons in the name");
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
FileMetadata existingMetadata = store.retrieveMetadata(key);
if (existingMetadata != null) {
if (existingMetadata.isDir()) {
throw new IOException("Cannot create file " + f
+ "; already exists as a directory.");
}
if (!overwrite) {
throw new IOException("File already exists:" + f);
}
}
Path parentFolder = absolutePath.getParent();
if (parentFolder != null && parentFolder.getParent() != null) { // skip root
// Update the parent folder last modified time if the parent folder
// already exists.
String parentKey = pathToKey(parentFolder);
FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
if (parentMetadata != null && parentMetadata.isDir() &&
parentMetadata.getBlobMaterialization() == BlobMaterialization.Explicit) {
if (parentFolderLease != null) {
store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
} else {
updateParentFolderLastModifiedTime(key);
}
} else {
// Make sure that the parent folder exists.
// Create it using inherited permissions from the first existing directory going up the path
Path firstExisting = parentFolder.getParent();
FileMetadata metadata = store.retrieveMetadata(pathToKey(firstExisting));
while(metadata == null) {
// Guaranteed to terminate properly because we will eventually hit root, which will return non-null metadata
firstExisting = firstExisting.getParent();
metadata = store.retrieveMetadata(pathToKey(firstExisting));
}
mkdirs(parentFolder, metadata.getPermissionStatus().getPermission(), true);
}
}
// Mask the permission first (with the default permission mask as well).
FsPermission masked = applyUMask(permission, UMaskApplyMode.NewFile);
PermissionStatus permissionStatus = createPermissionStatus(masked);
OutputStream bufOutStream;
if (store.isPageBlobKey(key)) {
// Store page blobs directly in-place without renames.
bufOutStream = store.storefile(key, permissionStatus);
} else {
// This is a block blob, so open the output blob stream based on the
// encoded key.
//
String keyEncoded = encodeKey(key);
// First create a blob at the real key, pointing back to the temporary file
// This accomplishes a few things:
// 1. Makes sure we can create a file there.
// 2. Makes it visible to other concurrent threads/processes/nodes what
// we're
// doing.
// 3. Makes it easier to restore/cleanup data in the event of us crashing.
store.storeEmptyLinkFile(key, keyEncoded, permissionStatus);
// The key is encoded to point to a common container at the storage server.
// This reduces the number of splits on the server side when load balancing.
// Ingress to Azure storage can take advantage of earlier splits. We remove
// the root path to the key and prefix a random GUID to the tail (or leaf
// filename) of the key. Keys are thus broadly and randomly distributed over
// a single container to ease load balancing on the storage server. When the
// blob is committed it is renamed to its earlier key. Uncommitted blocks
// are not cleaned up and we leave it to Azure storage to garbage collect
// these
// blocks.
bufOutStream = new NativeAzureFsOutputStream(store.storefile(
keyEncoded, permissionStatus), key, keyEncoded);
}
// Construct the data output stream from the buffered output stream.
FSDataOutputStream fsOut = new FSDataOutputStream(bufOutStream, statistics);
// Increment the counter
instrumentation.fileCreated();
// Return data output stream to caller.
return fsOut;
}
@Override
@Deprecated
public boolean delete(Path path) throws IOException {
return delete(path, true);
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return delete(f, recursive, false);
}
/**
* Delete the specified file or folder. The parameter
* skipParentFolderLastModifidedTimeUpdate
* is used in the case of atomic folder rename redo. In that case, there is
* a lease on the parent folder, so (without reworking the code) modifying
* the parent folder update time will fail because of a conflict with the
* lease. Since we are going to delete the folder soon anyway so accurate
* modified time is not necessary, it's easier to just skip
* the modified time update.
*
* @param f
* @param recursive
* @param skipParentFolderLastModifidedTimeUpdate If true, don't update the folder last
* modified time.
* @return true if and only if the file is deleted
* @throws IOException
*/
public boolean delete(Path f, boolean recursive,
boolean skipParentFolderLastModifidedTimeUpdate) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting file: " + f.toString());
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
// Capture the metadata for the path.
//
FileMetadata metaFile = store.retrieveMetadata(key);
if (null == metaFile) {
// The path to be deleted does not exist.
return false;
}
// The path exists, determine if it is a folder containing objects,
// an empty folder, or a simple file and take the appropriate actions.
if (!metaFile.isDir()) {
// The path specifies a file. We need to check the parent path
// to make sure it's a proper materialized directory before we
// delete the file. Otherwise we may get into a situation where
// the file we were deleting was the last one in an implicit directory
// (e.g. the blob store only contains the blob a/b and there's no
// corresponding directory blob a) and that would implicitly delete
// the directory as well, which is not correct.
Path parentPath = absolutePath.getParent();
if (parentPath.getParent() != null) {// Not root
String parentKey = pathToKey(parentPath);
FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
if (!parentMetadata.isDir()) {
// Invalid state: the parent path is actually a file. Throw.
throw new AzureException("File " + f + " has a parent directory "
+ parentPath + " which is also a file. Can't resolve.");
}
if (parentMetadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found an implicit parent directory while trying to"
+ " delete the file " + f + ". Creating the directory blob for"
+ " it in " + parentKey + ".");
}
store.storeEmptyFolder(parentKey,
createPermissionStatus(FsPermission.getDefault()));
} else {
if (!skipParentFolderLastModifidedTimeUpdate) {
updateParentFolderLastModifiedTime(key);
}
}
}
store.delete(key);
instrumentation.fileDeleted();
} else {
// The path specifies a folder. Recursively delete all entries under the
// folder.
Path parentPath = absolutePath.getParent();
if (parentPath.getParent() != null) {
String parentKey = pathToKey(parentPath);
FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
if (parentMetadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found an implicit parent directory while trying to"
+ " delete the directory " + f
+ ". Creating the directory blob for" + " it in " + parentKey
+ ".");
}
store.storeEmptyFolder(parentKey,
createPermissionStatus(FsPermission.getDefault()));
}
}
// List all the blobs in the current folder.
String priorLastKey = null;
PartialListing listing = store.listAll(key, AZURE_LIST_ALL, 1,
priorLastKey);
FileMetadata[] contents = listing.getFiles();
if (!recursive && contents.length > 0) {
// The folder is non-empty and recursive delete was not specified.
// Throw an exception indicating that a non-recursive delete was
// specified for a non-empty folder.
throw new IOException("Non-recursive delete of non-empty directory "
+ f.toString());
}
// Delete all the files in the folder.
for (FileMetadata p : contents) {
// Tag on the directory name found as the suffix of the suffix of the
// parent directory to get the new absolute path.
String suffix = p.getKey().substring(
p.getKey().lastIndexOf(PATH_DELIMITER));
if (!p.isDir()) {
store.delete(key + suffix);
instrumentation.fileDeleted();
} else {
// Recursively delete contents of the sub-folders. Notice this also
// deletes the blob for the directory.
if (!delete(new Path(f.toString() + suffix), true)) {
return false;
}
}
}
store.delete(key);
// Update parent directory last modified time
Path parent = absolutePath.getParent();
if (parent != null && parent.getParent() != null) { // not root
if (!skipParentFolderLastModifidedTimeUpdate) {
updateParentFolderLastModifiedTime(key);
}
}
instrumentation.directoryDeleted();
}
// File or directory was successfully deleted.
return true;
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Getting the file status for " + f.toString());
}
// Capture the absolute path and the path to key.
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
if (key.length() == 0) { // root always exists
return newDirectory(null, absolutePath);
}
// The path is either a folder or a file. Retrieve metadata to
// determine if it is a directory or file.
FileMetadata meta = store.retrieveMetadata(key);
if (meta != null) {
if (meta.isDir()) {
// The path is a folder with files in it.
//
if (LOG.isDebugEnabled()) {
LOG.debug("Path " + f.toString() + "is a folder.");
}
// If a rename operation for the folder was pending, redo it.
// Then the file does not exist, so signal that.
if (conditionalRedoFolderRename(f)) {
throw new FileNotFoundException(
absolutePath + ": No such file or directory.");
}
// Return reference to the directory object.
return newDirectory(meta, absolutePath);
}
// The path is a file.
if (LOG.isDebugEnabled()) {
LOG.debug("Found the path: " + f.toString() + " as a file.");
}
// Return with reference to a file object.
return newFile(meta, absolutePath);
}
// File not found. Throw exception no such file or directory.
//
throw new FileNotFoundException(
absolutePath + ": No such file or directory.");
}
// Return true if there is a rename pending and we redo it, otherwise false.
private boolean conditionalRedoFolderRename(Path f) throws IOException {
// Can't rename /, so return immediately in that case.
if (f.getName().equals("")) {
return false;
}
// Check if there is a -RenamePending.json file for this folder, and if so,
// redo the rename.
Path absoluteRenamePendingFile = renamePendingFilePath(f);
if (exists(absoluteRenamePendingFile)) {
FolderRenamePending pending =
new FolderRenamePending(absoluteRenamePendingFile, this);
pending.redo();
return true;
} else {
return false;
}
}
// Return the path name that would be used for rename of folder with path f.
private Path renamePendingFilePath(Path f) {
Path absPath = makeAbsolute(f);
String key = pathToKey(absPath);
key += "-RenamePending.json";
return keyToPath(key);
}
@Override
public URI getUri() {
return uri;
}
/**
* Retrieve the status of a given path if it is a file, or of all the
* contained files if it is a directory.
*/
@Override
public FileStatus[] listStatus(Path f) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Listing status for " + f.toString());
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
Set<FileStatus> status = new TreeSet<FileStatus>();
FileMetadata meta = store.retrieveMetadata(key);
if (meta != null) {
if (!meta.isDir()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found path as a file");
}
return new FileStatus[] { newFile(meta, absolutePath) };
}
String partialKey = null;
PartialListing listing = store.list(key, AZURE_LIST_ALL, 1, partialKey);
// For any -RenamePending.json files in the listing,
// push the rename forward.
boolean renamed = conditionalRedoFolderRenames(listing);
// If any renames were redone, get another listing,
// since the current one may have changed due to the redo.
if (renamed) {
listing = store.list(key, AZURE_LIST_ALL, 1, partialKey);
}
for (FileMetadata fileMetadata : listing.getFiles()) {
Path subpath = keyToPath(fileMetadata.getKey());
// Test whether the metadata represents a file or directory and
// add the appropriate metadata object.
//
// Note: There was a very old bug here where directories were added
// to the status set as files flattening out recursive listings
// using "-lsr" down the file system hierarchy.
if (fileMetadata.isDir()) {
// Make sure we hide the temp upload folder
if (fileMetadata.getKey().equals(AZURE_TEMP_FOLDER)) {
// Don't expose that.
continue;
}
status.add(newDirectory(fileMetadata, subpath));
} else {
status.add(newFile(fileMetadata, subpath));
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Found path as a directory with " + status.size()
+ " files in it.");
}
} else {
// There is no metadata found for the path.
if (LOG.isDebugEnabled()) {
LOG.debug("Did not find any metadata for path: " + key);
}
throw new FileNotFoundException("File" + f + " does not exist.");
}
return status.toArray(new FileStatus[0]);
}
// Redo any folder renames needed if there are rename pending files in the
// directory listing. Return true if one or more redo operations were done.
private boolean conditionalRedoFolderRenames(PartialListing listing)
throws IllegalArgumentException, IOException {
boolean renamed = false;
for (FileMetadata fileMetadata : listing.getFiles()) {
Path subpath = keyToPath(fileMetadata.getKey());
if (isRenamePendingFile(subpath)) {
FolderRenamePending pending =
new FolderRenamePending(subpath, this);
pending.redo();
renamed = true;
}
}
return renamed;
}
// True if this is a folder rename pending file, else false.
private boolean isRenamePendingFile(Path path) {
return path.toString().endsWith(FolderRenamePending.SUFFIX);
}
private FileStatus newFile(FileMetadata meta, Path path) {
return new FileStatus (
meta.getLength(),
false,
1,
blockSize,
meta.getLastModified(),
0,
meta.getPermissionStatus().getPermission(),
meta.getPermissionStatus().getUserName(),
meta.getPermissionStatus().getGroupName(),
path.makeQualified(getUri(), getWorkingDirectory()));
}
private FileStatus newDirectory(FileMetadata meta, Path path) {
return new FileStatus (
0,
true,
1,
blockSize,
meta == null ? 0 : meta.getLastModified(),
0,
meta == null ? FsPermission.getDefault() : meta.getPermissionStatus().getPermission(),
meta == null ? "" : meta.getPermissionStatus().getUserName(),
meta == null ? "" : meta.getPermissionStatus().getGroupName(),
path.makeQualified(getUri(), getWorkingDirectory()));
}
private static enum UMaskApplyMode {
NewFile,
NewDirectory,
NewDirectoryNoUmask,
ChangeExistingFile,
ChangeExistingDirectory,
}
/**
* Applies the applicable UMASK's on the given permission.
*
* @param permission
* The permission to mask.
* @param applyMode
* Whether to also apply the default umask.
* @return The masked persmission.
*/
private FsPermission applyUMask(final FsPermission permission,
final UMaskApplyMode applyMode) {
FsPermission newPermission = new FsPermission(permission);
// Apply the default umask - this applies for new files or directories.
if (applyMode == UMaskApplyMode.NewFile
|| applyMode == UMaskApplyMode.NewDirectory) {
newPermission = newPermission
.applyUMask(FsPermission.getUMask(getConf()));
}
return newPermission;
}
/**
* Creates the PermissionStatus object to use for the given permission, based
* on the current user in context.
*
* @param permission
* The permission for the file.
* @return The permission status object to use.
* @throws IOException
* If login fails in getCurrentUser
*/
private PermissionStatus createPermissionStatus(FsPermission permission)
throws IOException {
// Create the permission status for this file based on current user
return new PermissionStatus(
UserGroupInformation.getCurrentUser().getShortUserName(),
getConf().get(AZURE_DEFAULT_GROUP_PROPERTY_NAME,
AZURE_DEFAULT_GROUP_DEFAULT),
permission);
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
return mkdirs(f, permission, false);
}
public boolean mkdirs(Path f, FsPermission permission, boolean noUmask) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Creating directory: " + f.toString());
}
if (containsColon(f)) {
throw new IOException("Cannot create directory " + f
+ " through WASB that has colons in the name");
}
Path absolutePath = makeAbsolute(f);
PermissionStatus permissionStatus = null;
if(noUmask) {
// ensure owner still has wx permissions at the minimum
permissionStatus = createPermissionStatus(
applyUMask(FsPermission.createImmutable((short) (permission.toShort() | USER_WX_PERMISION)),
UMaskApplyMode.NewDirectoryNoUmask));
} else {
permissionStatus = createPermissionStatus(
applyUMask(permission, UMaskApplyMode.NewDirectory));
}
ArrayList<String> keysToCreateAsFolder = new ArrayList<String>();
ArrayList<String> keysToUpdateAsFolder = new ArrayList<String>();
boolean childCreated = false;
// Check that there is no file in the parent chain of the given path.
for (Path current = absolutePath, parent = current.getParent();
parent != null; // Stop when you get to the root
current = parent, parent = current.getParent()) {
String currentKey = pathToKey(current);
FileMetadata currentMetadata = store.retrieveMetadata(currentKey);
if (currentMetadata != null && !currentMetadata.isDir()) {
throw new IOException("Cannot create directory " + f + " because " +
current + " is an existing file.");
} else if (currentMetadata == null) {
keysToCreateAsFolder.add(currentKey);
childCreated = true;
} else {
// The directory already exists. Its last modified time need to be
// updated if there is a child directory created under it.
if (childCreated) {
keysToUpdateAsFolder.add(currentKey);
}
childCreated = false;
}
}
for (String currentKey : keysToCreateAsFolder) {
store.storeEmptyFolder(currentKey, permissionStatus);
}
instrumentation.directoryCreated();
// otherwise throws exception
return true;
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Opening file: " + f.toString());
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
FileMetadata meta = store.retrieveMetadata(key);
if (meta == null) {
throw new FileNotFoundException(f.toString());
}
if (meta.isDir()) {
throw new FileNotFoundException(f.toString()
+ " is a directory not a file.");
}
return new FSDataInputStream(new BufferedFSInputStream(
new NativeAzureFsInputStream(store.retrieve(key), key, meta.getLength()), bufferSize));
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
FolderRenamePending renamePending = null;
if (LOG.isDebugEnabled()) {
LOG.debug("Moving " + src + " to " + dst);
}
if (containsColon(dst)) {
throw new IOException("Cannot rename to file " + dst
+ " through WASB that has colons in the name");
}
String srcKey = pathToKey(makeAbsolute(src));
if (srcKey.length() == 0) {
// Cannot rename root of file system
return false;
}
// Figure out the final destination
Path absoluteDst = makeAbsolute(dst);
String dstKey = pathToKey(absoluteDst);
FileMetadata dstMetadata = store.retrieveMetadata(dstKey);
if (dstMetadata != null && dstMetadata.isDir()) {
// It's an existing directory.
dstKey = pathToKey(makeAbsolute(new Path(dst, src.getName())));
if (LOG.isDebugEnabled()) {
LOG.debug("Destination " + dst
+ " is a directory, adjusted the destination to be " + dstKey);
}
} else if (dstMetadata != null) {
// Attempting to overwrite a file using rename()
if (LOG.isDebugEnabled()) {
LOG.debug("Destination " + dst
+ " is an already existing file, failing the rename.");
}
return false;
} else {
// Check that the parent directory exists.
FileMetadata parentOfDestMetadata =
store.retrieveMetadata(pathToKey(absoluteDst.getParent()));
if (parentOfDestMetadata == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Parent of the destination " + dst
+ " doesn't exist, failing the rename.");
}
return false;
} else if (!parentOfDestMetadata.isDir()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Parent of the destination " + dst
+ " is a file, failing the rename.");
}
return false;
}
}
FileMetadata srcMetadata = store.retrieveMetadata(srcKey);
if (srcMetadata == null) {
// Source doesn't exist
if (LOG.isDebugEnabled()) {
LOG.debug("Source " + src + " doesn't exist, failing the rename.");
}
return false;
} else if (!srcMetadata.isDir()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Source " + src + " found as a file, renaming.");
}
store.rename(srcKey, dstKey);
} else {
// Prepare for, execute and clean up after of all files in folder, and
// the root file, and update the last modified time of the source and
// target parent folders. The operation can be redone if it fails part
// way through, by applying the "Rename Pending" file.
// The following code (internally) only does atomic rename preparation
// and lease management for page blob folders, limiting the scope of the
// operation to HBase log file folders, where atomic rename is required.
// In the future, we could generalize it easily to all folders.
renamePending = prepareAtomicFolderRename(srcKey, dstKey);
renamePending.execute();
if (LOG.isDebugEnabled()) {
LOG.debug("Renamed " + src + " to " + dst + " successfully.");
}
renamePending.cleanup();
return true;
}
// Update the last-modified time of the parent folders of both source
// and destination.
updateParentFolderLastModifiedTime(srcKey);
updateParentFolderLastModifiedTime(dstKey);
if (LOG.isDebugEnabled()) {
LOG.debug("Renamed " + src + " to " + dst + " successfully.");
}
return true;
}
/**
* Update the last-modified time of the parent folder of the file
* identified by key.
* @param key
* @throws IOException
*/
private void updateParentFolderLastModifiedTime(String key)
throws IOException {
Path parent = makeAbsolute(keyToPath(key)).getParent();
if (parent != null && parent.getParent() != null) { // not root
String parentKey = pathToKey(parent);
// ensure the parent is a materialized folder
FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
// The metadata could be null if the implicit folder only contains a
// single file. In this case, the parent folder no longer exists if the
// file is renamed; so we can safely ignore the null pointer case.
if (parentMetadata != null) {
if (parentMetadata.isDir()
&& parentMetadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
store.storeEmptyFolder(parentKey,
createPermissionStatus(FsPermission.getDefault()));
}
if (store.isAtomicRenameKey(parentKey)) {
SelfRenewingLease lease = null;
try {
lease = leaseSourceFolder(parentKey);
store.updateFolderLastModifiedTime(parentKey, lease);
} catch (AzureException e) {
String errorCode = "";
try {
StorageException e2 = (StorageException) e.getCause();
errorCode = e2.getErrorCode();
} catch (Exception e3) {
// do nothing if cast fails
}
if (errorCode.equals("BlobNotFound")) {
throw new FileNotFoundException("Folder does not exist: " + parentKey);
}
LOG.warn("Got unexpected exception trying to get lease on "
+ parentKey + ". " + e.getMessage());
throw e;
} finally {
try {
if (lease != null) {
lease.free();
}
} catch (Exception e) {
LOG.error("Unable to free lease on " + parentKey, e);
}
}
} else {
store.updateFolderLastModifiedTime(parentKey, null);
}
}
}
}
/**
* If the source is a page blob folder,
* prepare to rename this folder atomically. This means to get exclusive
* access to the source folder, and record the actions to be performed for
* this rename in a "Rename Pending" file. This code was designed to
* meet the needs of HBase, which requires atomic rename of write-ahead log
* (WAL) folders for correctness.
*
* Before calling this method, the caller must ensure that the source is a
* folder.
*
* For non-page-blob directories, prepare the in-memory information needed,
* but don't take the lease or write the redo file. This is done to limit the
* scope of atomic folder rename to HBase, at least at the time of writing
* this code.
*
* @param srcKey Source folder name.
* @param dstKey Destination folder name.
* @throws IOException
*/
private FolderRenamePending prepareAtomicFolderRename(
String srcKey, String dstKey) throws IOException {
if (store.isAtomicRenameKey(srcKey)) {
// Block unwanted concurrent access to source folder.
SelfRenewingLease lease = leaseSourceFolder(srcKey);
// Prepare in-memory information needed to do or redo a folder rename.
FolderRenamePending renamePending =
new FolderRenamePending(srcKey, dstKey, lease, this);
// Save it to persistent storage to help recover if the operation fails.
renamePending.writeFile(this);
return renamePending;
} else {
FolderRenamePending renamePending =
new FolderRenamePending(srcKey, dstKey, null, this);
return renamePending;
}
}
/**
* Get a self-renewing Azure blob lease on the source folder zero-byte file.
*/
private SelfRenewingLease leaseSourceFolder(String srcKey)
throws AzureException {
return store.acquireLease(srcKey);
}
/**
* Return an array containing hostnames, offset and size of
* portions of the given file. For WASB we'll just lie and give
* fake hosts to make sure we get many splits in MR jobs.
*/
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file,
long start, long len) throws IOException {
if (file == null) {
return null;
}
if ((start < 0) || (len < 0)) {
throw new IllegalArgumentException("Invalid start or len parameter");
}
if (file.getLen() < start) {
return new BlockLocation[0];
}
final String blobLocationHost = getConf().get(
AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME,
AZURE_BLOCK_LOCATION_HOST_DEFAULT);
final String[] name = { blobLocationHost };
final String[] host = { blobLocationHost };
long blockSize = file.getBlockSize();
if (blockSize <= 0) {
throw new IllegalArgumentException(
"The block size for the given file is not a positive number: "
+ blockSize);
}
int numberOfLocations = (int) (len / blockSize)
+ ((len % blockSize == 0) ? 0 : 1);
BlockLocation[] locations = new BlockLocation[numberOfLocations];
for (int i = 0; i < locations.length; i++) {
long currentOffset = start + (i * blockSize);
long currentLength = Math.min(blockSize, start + len - currentOffset);
locations[i] = new BlockLocation(name, host, currentOffset, currentLength);
}
return locations;
}
/**
* Set the working directory to the given directory.
*/
@Override
public void setWorkingDirectory(Path newDir) {
workingDir = makeAbsolute(newDir);
}
@Override
public Path getWorkingDirectory() {
return workingDir;
}
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
Path absolutePath = makeAbsolute(p);
String key = pathToKey(absolutePath);
FileMetadata metadata = store.retrieveMetadata(key);
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
permission = applyUMask(permission,
metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory
: UMaskApplyMode.ChangeExistingFile);
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, createPermissionStatus(permission));
} else if (!metadata.getPermissionStatus().getPermission().
equals(permission)) {
store.changePermissionStatus(key, new PermissionStatus(
metadata.getPermissionStatus().getUserName(),
metadata.getPermissionStatus().getGroupName(),
permission));
}
}
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
Path absolutePath = makeAbsolute(p);
String key = pathToKey(absolutePath);
FileMetadata metadata = store.retrieveMetadata(key);
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
PermissionStatus newPermissionStatus = new PermissionStatus(
username == null ?
metadata.getPermissionStatus().getUserName() : username,
groupname == null ?
metadata.getPermissionStatus().getGroupName() : groupname,
metadata.getPermissionStatus().getPermission());
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, newPermissionStatus);
} else {
store.changePermissionStatus(key, newPermissionStatus);
}
}
@Override
public synchronized void close() throws IOException {
if (isClosed) {
return;
}
// Call the base close() to close any resources there.
super.close();
// Close the store to close any resources there - e.g. the bandwidth
// updater thread would be stopped at this time.
store.close();
// Notify the metrics system that this file system is closed, which may
// trigger one final metrics push to get the accurate final file system
// metrics out.
long startTime = System.currentTimeMillis();
if(!getConf().getBoolean(SKIP_AZURE_METRICS_PROPERTY_NAME, false)) {
AzureFileSystemMetricsSystem.unregisterSource(metricsSourceName);
AzureFileSystemMetricsSystem.fileSystemClosed();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Submitting metrics when file system closed took "
+ (System.currentTimeMillis() - startTime) + " ms.");
}
isClosed = true;
}
/**
* A handler that defines what to do with blobs whose upload was
* interrupted.
*/
private abstract class DanglingFileHandler {
abstract void handleFile(FileMetadata file, FileMetadata tempFile)
throws IOException;
}
/**
* Handler implementation for just deleting dangling files and cleaning
* them up.
*/
private class DanglingFileDeleter extends DanglingFileHandler {
@Override
void handleFile(FileMetadata file, FileMetadata tempFile)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting dangling file " + file.getKey());
}
store.delete(file.getKey());
store.delete(tempFile.getKey());
}
}
/**
* Handler implementation for just moving dangling files to recovery
* location (/lost+found).
*/
private class DanglingFileRecoverer extends DanglingFileHandler {
private final Path destination;
DanglingFileRecoverer(Path destination) {
this.destination = destination;
}
@Override
void handleFile(FileMetadata file, FileMetadata tempFile)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Recovering " + file.getKey());
}
// Move to the final destination
String finalDestinationKey =
pathToKey(new Path(destination, file.getKey()));
store.rename(tempFile.getKey(), finalDestinationKey);
if (!finalDestinationKey.equals(file.getKey())) {
// Delete the empty link file now that we've restored it.
store.delete(file.getKey());
}
}
}
/**
* Check if a path has colons in its name
*/
private boolean containsColon(Path p) {
return p.toUri().getPath().toString().contains(":");
}
/**
* Implements recover and delete (-move and -delete) behaviors for handling
* dangling files (blobs whose upload was interrupted).
*
* @param root
* The root path to check from.
* @param handler
* The handler that deals with dangling files.
*/
private void handleFilesWithDanglingTempData(Path root,
DanglingFileHandler handler) throws IOException {
// Calculate the cut-off for when to consider a blob to be dangling.
long cutoffForDangling = new Date().getTime()
- getConf().getInt(AZURE_TEMP_EXPIRY_PROPERTY_NAME,
AZURE_TEMP_EXPIRY_DEFAULT) * 1000;
// Go over all the blobs under the given root and look for blobs to
// recover.
String priorLastKey = null;
do {
PartialListing listing = store.listAll(pathToKey(root), AZURE_LIST_ALL,
AZURE_UNBOUNDED_DEPTH, priorLastKey);
for (FileMetadata file : listing.getFiles()) {
if (!file.isDir()) { // We don't recover directory blobs
// See if this blob has a link in it (meaning it's a place-holder
// blob for when the upload to the temp blob is complete).
String link = store.getLinkInFileMetadata(file.getKey());
if (link != null) {
// It has a link, see if the temp blob it is pointing to is
// existent and old enough to be considered dangling.
FileMetadata linkMetadata = store.retrieveMetadata(link);
if (linkMetadata != null
&& linkMetadata.getLastModified() >= cutoffForDangling) {
// Found one!
handler.handleFile(file, linkMetadata);
}
}
}
}
priorLastKey = listing.getPriorLastKey();
} while (priorLastKey != null);
}
/**
* Looks under the given root path for any blob that are left "dangling",
* meaning that they are place-holder blobs that we created while we upload
* the data to a temporary blob, but for some reason we crashed in the middle
* of the upload and left them there. If any are found, we move them to the
* destination given.
*
* @param root
* The root path to consider.
* @param destination
* The destination path to move any recovered files to.
* @throws IOException
*/
public void recoverFilesWithDanglingTempData(Path root, Path destination)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Recovering files with dangling temp data in " + root);
}
handleFilesWithDanglingTempData(root,
new DanglingFileRecoverer(destination));
}
/**
* Looks under the given root path for any blob that are left "dangling",
* meaning that they are place-holder blobs that we created while we upload
* the data to a temporary blob, but for some reason we crashed in the middle
* of the upload and left them there. If any are found, we delete them.
*
* @param root
* The root path to consider.
* @throws IOException
*/
public void deleteFilesWithDanglingTempData(Path root) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting files with dangling temp data in " + root);
}
handleFilesWithDanglingTempData(root, new DanglingFileDeleter());
}
@Override
protected void finalize() throws Throwable {
LOG.debug("finalize() called.");
close();
super.finalize();
}
/**
* Encode the key with a random prefix for load balancing in Azure storage.
* Upload data to a random temporary file then do storage side renaming to
* recover the original key.
*
* @param aKey
* @return Encoded version of the original key.
*/
private static String encodeKey(String aKey) {
// Get the tail end of the key name.
//
String fileName = aKey.substring(aKey.lastIndexOf(Path.SEPARATOR) + 1,
aKey.length());
// Construct the randomized prefix of the file name. The prefix ensures the
// file always drops into the same folder but with a varying tail key name.
String filePrefix = AZURE_TEMP_FOLDER + Path.SEPARATOR
+ UUID.randomUUID().toString();
// Concatenate the randomized prefix with the tail of the key name.
String randomizedKey = filePrefix + fileName;
// Return to the caller with the randomized key.
return randomizedKey;
}
}
| 85,315 | 34.080592 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.annotations.VisibleForTesting;
/**
* An fsck tool implementation for WASB that does various admin/cleanup/recovery
* tasks on the WASB file system.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class WasbFsck extends Configured implements Tool {
private FileSystem mockFileSystemForTesting = null;
private static final String LOST_AND_FOUND_PATH = "/lost+found";
private boolean pathNameWarning = false;
public WasbFsck(Configuration conf) {
super(conf);
}
/**
* For testing purposes, set the file system to use here instead of relying on
* getting it from the FileSystem class based on the URI.
*
* @param fileSystem
* The file system to use.
*/
@VisibleForTesting
public void setMockFileSystemForTesting(FileSystem fileSystem) {
this.mockFileSystemForTesting = fileSystem;
}
@Override
public int run(String[] args) throws Exception {
if (doPrintUsage(Arrays.asList(args))) {
printUsage();
return -1;
}
Path pathToCheck = null;
boolean doRecover = false;
boolean doDelete = false;
for (String arg : args) {
if (!arg.startsWith("-")) {
if (pathToCheck != null) {
System.err
.println("Can't specify multiple paths to check on the command-line");
return 1;
}
pathToCheck = new Path(arg);
} else if (arg.equals("-move")) {
doRecover = true;
} else if (arg.equals("-delete")) {
doDelete = true;
}
}
if (doRecover && doDelete) {
System.err
.println("Conflicting options: can't specify both -move and -delete.");
return 1;
}
if (pathToCheck == null) {
pathToCheck = new Path("/"); // Check everything.
}
FileSystem fs;
if (mockFileSystemForTesting == null) {
fs = FileSystem.get(pathToCheck.toUri(), getConf());
} else {
fs = mockFileSystemForTesting;
}
if (!recursiveCheckChildPathName(fs, fs.makeQualified(pathToCheck))) {
pathNameWarning = true;
}
if (!(fs instanceof NativeAzureFileSystem)) {
System.err
.println("Can only check WASB file system. Instead I'm asked to"
+ " check: " + fs.getUri());
return 2;
}
NativeAzureFileSystem wasbFs = (NativeAzureFileSystem) fs;
if (doRecover) {
System.out.println("Recovering files with dangling data under: "
+ pathToCheck);
wasbFs.recoverFilesWithDanglingTempData(pathToCheck, new Path(
LOST_AND_FOUND_PATH));
} else if (doDelete) {
System.out.println("Deleting temp files with dangling data under: "
+ pathToCheck);
wasbFs.deleteFilesWithDanglingTempData(pathToCheck);
} else {
System.out.println("Please specify -move or -delete");
}
return 0;
}
public boolean getPathNameWarning() {
return pathNameWarning;
}
/**
* Recursively check if a given path and its child paths have colons in their
* names. It returns true if none of them has a colon or this path does not
* exist, and false otherwise.
*/
private boolean recursiveCheckChildPathName(FileSystem fs, Path p)
throws IOException {
if (p == null) {
return true;
}
if (!fs.exists(p)) {
System.out.println("Path " + p + " does not exist!");
return true;
}
if (fs.isFile(p)) {
if (containsColon(p)) {
System.out.println("Warning: file " + p + " has a colon in its name.");
return false;
} else {
return true;
}
} else {
boolean flag;
if (containsColon(p)) {
System.out.println("Warning: directory " + p
+ " has a colon in its name.");
flag = false;
} else {
flag = true;
}
FileStatus[] listed = fs.listStatus(p);
for (FileStatus l : listed) {
if (!recursiveCheckChildPathName(fs, l.getPath())) {
flag = false;
}
}
return flag;
}
}
private boolean containsColon(Path p) {
return p.toUri().getPath().toString().contains(":");
}
private static void printUsage() {
System.out.println("Usage: WasbFSck [<path>] [-move | -delete]");
System.out.println("\t<path>\tstart checking from this path");
System.out.println("\t-move\tmove any files whose upload was interrupted"
+ " mid-stream to " + LOST_AND_FOUND_PATH);
System.out
.println("\t-delete\tdelete any files whose upload was interrupted"
+ " mid-stream");
ToolRunner.printGenericCommandUsage(System.out);
}
private boolean doPrintUsage(List<String> args) {
return args.contains("-H");
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new WasbFsck(new Configuration()), args);
System.exit(res);
}
}
| 6,181 | 30.380711 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/FileMetadata.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
/**
* <p>
* Holds basic metadata for a file stored in a {@link NativeFileSystemStore}.
* </p>
*/
@InterfaceAudience.Private
class FileMetadata {
private final String key;
private final long length;
private final long lastModified;
private final boolean isDir;
private final PermissionStatus permissionStatus;
private final BlobMaterialization blobMaterialization;
/**
* Constructs a FileMetadata object for a file.
*
* @param key
* The key (path) to the file.
* @param length
* The length in bytes of the file.
* @param lastModified
* The last modified date (milliseconds since January 1, 1970 UTC.)
* @param permissionStatus
* The permission for the file.
*/
public FileMetadata(String key, long length, long lastModified,
PermissionStatus permissionStatus) {
this.key = key;
this.length = length;
this.lastModified = lastModified;
this.isDir = false;
this.permissionStatus = permissionStatus;
this.blobMaterialization = BlobMaterialization.Explicit; // File are never
// implicit.
}
/**
* Constructs a FileMetadata object for a directory.
*
* @param key
* The key (path) to the directory.
* @param lastModified
* The last modified date (milliseconds since January 1, 1970 UTC.)
* @param permissionStatus
* The permission for the directory.
* @param blobMaterialization
* Whether this is an implicit (no real blob backing it) or explicit
* directory.
*/
public FileMetadata(String key, long lastModified,
PermissionStatus permissionStatus, BlobMaterialization blobMaterialization) {
this.key = key;
this.isDir = true;
this.length = 0;
this.lastModified = lastModified;
this.permissionStatus = permissionStatus;
this.blobMaterialization = blobMaterialization;
}
public boolean isDir() {
return isDir;
}
public String getKey() {
return key;
}
public long getLength() {
return length;
}
public long getLastModified() {
return lastModified;
}
public PermissionStatus getPermissionStatus() {
return permissionStatus;
}
/**
* Indicates whether this is an implicit directory (no real blob backing it)
* or an explicit one.
*
* @return Implicit if this is an implicit directory, or Explicit if it's an
* explicit directory or a file.
*/
public BlobMaterialization getBlobMaterialization() {
return blobMaterialization;
}
@Override
public String toString() {
return "FileMetadata[" + key + ", " + length + ", " + lastModified + ", "
+ permissionStatus + "]";
}
}
| 3,722 | 29.768595 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.fs.Syncable;
/**
* Support the Syncable interface on top of a DataOutputStream.
* This allows passing the sync/hflush/hsync calls through to the
* wrapped stream passed in to the constructor. This is required
* for HBase when wrapping a PageBlobOutputStream used as a write-ahead log.
*/
public class SyncableDataOutputStream extends DataOutputStream implements Syncable {
public SyncableDataOutputStream(OutputStream out) {
super(out);
}
@Override
@Deprecated
public void sync() throws IOException {
hflush();
}
@Override
public void hflush() throws IOException {
if (out instanceof Syncable) {
((Syncable) out).hflush();
} else {
out.flush();
}
}
@Override
public void hsync() throws IOException {
if (out instanceof Syncable) {
((Syncable) out).hsync();
} else {
out.flush();
}
}
}
| 1,836 | 28.15873 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.PAGE_DATA_SIZE;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.PAGE_HEADER_SIZE;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.PAGE_SIZE;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.toShort;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.withMD5Checking;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.PageRange;
/**
* An input stream that reads file data from a page blob stored
* using ASV's custom format.
*/
final class PageBlobInputStream extends InputStream {
private static final Log LOG = LogFactory.getLog(PageBlobInputStream.class);
// The blob we're reading from.
private final CloudPageBlobWrapper blob;
// The operation context to use for storage requests.
private final OperationContext opContext;
// The number of pages remaining to be read from the server.
private long numberOfPagesRemaining;
// The current byte offset to start reading from the server next,
// equivalent to (total number of pages we've read) * (page size).
private long currentOffsetInBlob;
// The buffer holding the current data we last read from the server.
private byte[] currentBuffer;
// The current byte offset we're at in the buffer.
private int currentOffsetInBuffer;
// Maximum number of pages to get per any one request.
private static final int MAX_PAGES_PER_DOWNLOAD =
4 * 1024 * 1024 / PAGE_SIZE;
// Whether the stream has been closed.
private boolean closed = false;
// Total stream size, or -1 if not initialized.
long pageBlobSize = -1;
// Current position in stream of valid data.
long filePosition = 0;
/**
* Helper method to extract the actual data size of a page blob.
* This typically involves 2 service requests (one for page ranges, another
* for the last page's data).
*
* @param blob The blob to get the size from.
* @param opContext The operation context to use for the requests.
* @return The total data size of the blob in bytes.
* @throws IOException If the format is corrupt.
* @throws StorageException If anything goes wrong in the requests.
*/
public static long getPageBlobDataSize(CloudPageBlobWrapper blob,
OperationContext opContext) throws IOException, StorageException {
// Get the page ranges for the blob. There should be one range starting
// at byte 0, but we tolerate (and ignore) ranges after the first one.
ArrayList<PageRange> pageRanges =
blob.downloadPageRanges(new BlobRequestOptions(), opContext);
if (pageRanges.size() == 0) {
return 0;
}
if (pageRanges.get(0).getStartOffset() != 0) {
// Not expected: we always upload our page blobs as a contiguous range
// starting at byte 0.
throw badStartRangeException(blob, pageRanges.get(0));
}
long totalRawBlobSize = pageRanges.get(0).getEndOffset() + 1;
// Get the last page.
long lastPageStart = totalRawBlobSize - PAGE_SIZE;
ByteArrayOutputStream baos =
new ByteArrayOutputStream(PageBlobFormatHelpers.PAGE_SIZE);
blob.downloadRange(lastPageStart, PAGE_SIZE, baos,
new BlobRequestOptions(), opContext);
byte[] lastPage = baos.toByteArray();
short lastPageSize = getPageSize(blob, lastPage, 0);
long totalNumberOfPages = totalRawBlobSize / PAGE_SIZE;
return (totalNumberOfPages - 1) * PAGE_DATA_SIZE + lastPageSize;
}
/**
* Constructs a stream over the given page blob.
*/
public PageBlobInputStream(CloudPageBlobWrapper blob,
OperationContext opContext)
throws IOException {
this.blob = blob;
this.opContext = opContext;
ArrayList<PageRange> allRanges;
try {
allRanges =
blob.downloadPageRanges(new BlobRequestOptions(), opContext);
} catch (StorageException e) {
throw new IOException(e);
}
if (allRanges.size() > 0) {
if (allRanges.get(0).getStartOffset() != 0) {
throw badStartRangeException(blob, allRanges.get(0));
}
if (allRanges.size() > 1) {
LOG.warn(String.format(
"Blob %s has %d page ranges beyond the first range. "
+ "Only reading the first range.",
blob.getUri(), allRanges.size() - 1));
}
numberOfPagesRemaining =
(allRanges.get(0).getEndOffset() + 1) / PAGE_SIZE;
} else {
numberOfPagesRemaining = 0;
}
}
/** Return the size of the remaining available bytes
* if the size is less than or equal to {@link Integer#MAX_VALUE},
* otherwise, return {@link Integer#MAX_VALUE}.
*
* This is to match the behavior of DFSInputStream.available(),
* which some clients may rely on (HBase write-ahead log reading in
* particular).
*/
@Override
public synchronized int available() throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
if (pageBlobSize == -1) {
try {
pageBlobSize = getPageBlobDataSize(blob, opContext);
} catch (StorageException e) {
throw new IOException("Unable to get page blob size.", e);
}
}
final long remaining = pageBlobSize - filePosition;
return remaining <= Integer.MAX_VALUE ?
(int) remaining : Integer.MAX_VALUE;
}
@Override
public synchronized void close() throws IOException {
closed = true;
}
private boolean dataAvailableInBuffer() {
return currentBuffer != null
&& currentOffsetInBuffer < currentBuffer.length;
}
/**
* Check our buffer and download more from the server if needed.
* If data is not available in the buffer, method downloads maximum
* page blob download size (4MB) or if there is less then 4MB left,
* all remaining pages.
* If we are on the last page, method will return true even if
* we reached the end of stream.
* @return true if there's more data in the buffer, false if buffer is empty
* and we reached the end of the blob.
* @throws IOException
*/
private synchronized boolean ensureDataInBuffer() throws IOException {
if (dataAvailableInBuffer()) {
// We still have some data in our buffer.
return true;
}
currentBuffer = null;
if (numberOfPagesRemaining == 0) {
// No more data to read.
return false;
}
final long pagesToRead = Math.min(MAX_PAGES_PER_DOWNLOAD,
numberOfPagesRemaining);
final int bufferSize = (int) (pagesToRead * PAGE_SIZE);
// Download page to current buffer.
try {
// Create a byte array output stream to capture the results of the
// download.
ByteArrayOutputStream baos = new ByteArrayOutputStream(bufferSize);
blob.downloadRange(currentOffsetInBlob, bufferSize, baos,
withMD5Checking(), opContext);
currentBuffer = baos.toByteArray();
} catch (StorageException e) {
throw new IOException(e);
}
numberOfPagesRemaining -= pagesToRead;
currentOffsetInBlob += bufferSize;
currentOffsetInBuffer = PAGE_HEADER_SIZE;
// Since we just downloaded a new buffer, validate its consistency.
validateCurrentBufferConsistency();
return true;
}
private void validateCurrentBufferConsistency()
throws IOException {
if (currentBuffer.length % PAGE_SIZE != 0) {
throw new AssertionError("Unexpected buffer size: "
+ currentBuffer.length);
}
int numberOfPages = currentBuffer.length / PAGE_SIZE;
for (int page = 0; page < numberOfPages; page++) {
short currentPageSize = getPageSize(blob, currentBuffer,
page * PAGE_SIZE);
// Calculate the number of pages that exist after this one
// in the blob.
long totalPagesAfterCurrent =
(numberOfPages - page - 1) + numberOfPagesRemaining;
// Only the last page is allowed to be not filled completely.
if (currentPageSize < PAGE_DATA_SIZE
&& totalPagesAfterCurrent > 0) {
throw fileCorruptException(blob, String.format(
"Page with partial data found in the middle (%d pages from the"
+ " end) that only has %d bytes of data.",
totalPagesAfterCurrent, currentPageSize));
}
}
}
// Reads the page size from the page header at the given offset.
private static short getPageSize(CloudPageBlobWrapper blob,
byte[] data, int offset) throws IOException {
short pageSize = toShort(data[offset], data[offset + 1]);
if (pageSize < 0 || pageSize > PAGE_DATA_SIZE) {
throw fileCorruptException(blob, String.format(
"Unexpected page size in the header: %d.",
pageSize));
}
return pageSize;
}
@Override
public synchronized int read(byte[] outputBuffer, int offset, int len)
throws IOException {
// If len is zero return 0 per the InputStream contract
if (len == 0) {
return 0;
}
int numberOfBytesRead = 0;
while (len > 0) {
if (!ensureDataInBuffer()) {
break;
}
int bytesRemainingInCurrentPage = getBytesRemainingInCurrentPage();
int numBytesToRead = Math.min(len, bytesRemainingInCurrentPage);
System.arraycopy(currentBuffer, currentOffsetInBuffer, outputBuffer,
offset, numBytesToRead);
numberOfBytesRead += numBytesToRead;
offset += numBytesToRead;
len -= numBytesToRead;
if (numBytesToRead == bytesRemainingInCurrentPage) {
// We've finished this page, move on to the next.
advancePagesInBuffer(1);
} else {
currentOffsetInBuffer += numBytesToRead;
}
}
// if outputBuffer len is > 0 and zero bytes were read, we reached
// an EOF
if (numberOfBytesRead == 0) {
return -1;
}
filePosition += numberOfBytesRead;
return numberOfBytesRead;
}
@Override
public int read() throws IOException {
byte[] oneByte = new byte[1];
int result = read(oneByte);
if (result < 0) {
return result;
}
return oneByte[0];
}
/**
* Skips over and discards n bytes of data from this input stream.
* @param n the number of bytes to be skipped.
* @return the actual number of bytes skipped.
*/
@Override
public synchronized long skip(long n) throws IOException {
long skipped = skipImpl(n);
filePosition += skipped; // track the position in the stream
return skipped;
}
private long skipImpl(long n) throws IOException {
if (n == 0) {
return 0;
}
// First skip within the current buffer as much as possible.
long skippedWithinBuffer = skipWithinBuffer(n);
if (skippedWithinBuffer > n) {
// TO CONSIDER: Using a contracts framework such as Google's cofoja for
// these post-conditions.
throw new AssertionError(String.format(
"Bug in skipWithinBuffer: it skipped over %d bytes when asked to "
+ "skip %d bytes.", skippedWithinBuffer, n));
}
n -= skippedWithinBuffer;
long skipped = skippedWithinBuffer;
// Empty the current buffer, we're going beyond it.
currentBuffer = null;
// Skip over whole pages as necessary without retrieving them from the
// server.
long pagesToSkipOver = Math.min(
n / PAGE_DATA_SIZE,
numberOfPagesRemaining - 1);
numberOfPagesRemaining -= pagesToSkipOver;
currentOffsetInBlob += pagesToSkipOver * PAGE_SIZE;
skipped += pagesToSkipOver * PAGE_DATA_SIZE;
n -= pagesToSkipOver * PAGE_DATA_SIZE;
if (n == 0) {
return skipped;
}
// Now read in at the current position, and skip within current buffer.
if (!ensureDataInBuffer()) {
return skipped;
}
return skipped + skipWithinBuffer(n);
}
/**
* Skip over n bytes within the current buffer or just over skip the whole
* buffer if n is greater than the bytes remaining in the buffer.
* @param n The number of data bytes to skip.
* @return The number of bytes actually skipped.
* @throws IOException if data corruption found in the buffer.
*/
private long skipWithinBuffer(long n) throws IOException {
if (!dataAvailableInBuffer()) {
return 0;
}
long skipped = 0;
// First skip within the current page.
skipped = skipWithinCurrentPage(n);
if (skipped > n) {
throw new AssertionError(String.format(
"Bug in skipWithinCurrentPage: it skipped over %d bytes when asked"
+ " to skip %d bytes.", skipped, n));
}
n -= skipped;
if (n == 0 || !dataAvailableInBuffer()) {
return skipped;
}
// Calculate how many whole pages (pages before the possibly partially
// filled last page) remain.
int currentPageIndex = currentOffsetInBuffer / PAGE_SIZE;
int numberOfPagesInBuffer = currentBuffer.length / PAGE_SIZE;
int wholePagesRemaining = numberOfPagesInBuffer - currentPageIndex - 1;
if (n < (PAGE_DATA_SIZE * wholePagesRemaining)) {
// I'm within one of the whole pages remaining, skip in there.
advancePagesInBuffer((int) (n / PAGE_DATA_SIZE));
currentOffsetInBuffer += n % PAGE_DATA_SIZE;
return n + skipped;
}
// Skip over the whole pages.
advancePagesInBuffer(wholePagesRemaining);
skipped += wholePagesRemaining * PAGE_DATA_SIZE;
n -= wholePagesRemaining * PAGE_DATA_SIZE;
// At this point we know we need to skip to somewhere in the last page,
// or just go to the end.
return skipWithinCurrentPage(n) + skipped;
}
/**
* Skip over n bytes within the current page or just over skip the whole
* page if n is greater than the bytes remaining in the page.
* @param n The number of data bytes to skip.
* @return The number of bytes actually skipped.
* @throws IOException if data corruption found in the buffer.
*/
private long skipWithinCurrentPage(long n) throws IOException {
int remainingBytesInCurrentPage = getBytesRemainingInCurrentPage();
if (n < remainingBytesInCurrentPage) {
currentOffsetInBuffer += n;
return n;
} else {
advancePagesInBuffer(1);
return remainingBytesInCurrentPage;
}
}
/**
* Gets the number of bytes remaining within the current page in the buffer.
* @return The number of bytes remaining.
* @throws IOException if data corruption found in the buffer.
*/
private int getBytesRemainingInCurrentPage() throws IOException {
if (!dataAvailableInBuffer()) {
return 0;
}
// Calculate our current position relative to the start of the current
// page.
int currentDataOffsetInPage =
(currentOffsetInBuffer % PAGE_SIZE) - PAGE_HEADER_SIZE;
int pageBoundary = getCurrentPageStartInBuffer();
// Get the data size of the current page from the header.
short sizeOfCurrentPage = getPageSize(blob, currentBuffer, pageBoundary);
return sizeOfCurrentPage - currentDataOffsetInPage;
}
private static IOException badStartRangeException(CloudPageBlobWrapper blob,
PageRange startRange) {
return fileCorruptException(blob, String.format(
"Page blobs for ASV should always use a page range starting at byte 0. "
+ "This starts at byte %d.",
startRange.getStartOffset()));
}
private void advancePagesInBuffer(int numberOfPages) {
currentOffsetInBuffer =
getCurrentPageStartInBuffer()
+ (numberOfPages * PAGE_SIZE)
+ PAGE_HEADER_SIZE;
}
private int getCurrentPageStartInBuffer() {
return PAGE_SIZE * (currentOffsetInBuffer / PAGE_SIZE);
}
private static IOException fileCorruptException(CloudPageBlobWrapper blob,
String reason) {
return new IOException(String.format(
"The page blob: '%s' is corrupt or has an unexpected format: %s.",
blob.getUri(), reason));
}
}
| 17,018 | 34.905063 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.net.HttpURLConnection;
import java.security.InvalidKeyException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.Constants.HeaderConstants;
import com.microsoft.azure.storage.core.StorageCredentialsHelper;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.SendingRequestEvent;
import com.microsoft.azure.storage.StorageCredentials;
import com.microsoft.azure.storage.StorageEvent;
import com.microsoft.azure.storage.StorageException;
/**
* Manages the lifetime of binding on the operation contexts to intercept send
* request events to Azure storage.
*/
@InterfaceAudience.Private
public final class SendRequestIntercept extends StorageEvent<SendingRequestEvent> {
public static final Log LOG = LogFactory.getLog(SendRequestIntercept.class);
private static final String ALLOW_ALL_REQUEST_PRECONDITIONS = "*";
private final StorageCredentials storageCreds;
private final boolean allowConcurrentOOBIo;
private final OperationContext opContext;
/**
* Getter returning the storage account credentials.
*
* @return storageCreds - account storage credentials.
*/
private StorageCredentials getCredentials() {
return storageCreds;
}
/**
* Query if out-of-band I/Os are allowed.
*
* return allowConcurrentOOBIo - true if OOB I/O is allowed, and false
* otherwise.
*/
private boolean isOutOfBandIoAllowed() {
return allowConcurrentOOBIo;
}
/**
* Getter returning the operation context.
*
* @return storageCreds - account storage credentials.
*/
private OperationContext getOperationContext() {
return opContext;
}
/**
* Constructor for SendRequestThrottle.
*
* @param storageCreds
* - storage account credentials for signing packets.
*
*/
private SendRequestIntercept(StorageCredentials storageCreds,
boolean allowConcurrentOOBIo, OperationContext opContext) {
// Capture the send delay callback interface.
this.storageCreds = storageCreds;
this.allowConcurrentOOBIo = allowConcurrentOOBIo;
this.opContext = opContext;
}
/**
* Binds a new lister to the operation context so the WASB file system can
* appropriately intercept sends. By allowing concurrent OOB I/Os, we bypass
* the blob immutability check when reading streams.
*
* @param opContext
* The operation context to bind to listener.
*
* @param allowConcurrentOOBIo
* True if reads are allowed with concurrent OOB writes.
*/
public static void bind(StorageCredentials storageCreds,
OperationContext opContext, boolean allowConcurrentOOBIo) {
SendRequestIntercept sendListener = new SendRequestIntercept(storageCreds,
allowConcurrentOOBIo, opContext);
opContext.getSendingRequestEventHandler().addListener(sendListener);
}
/**
* Handler which processes the sending request event from Azure SDK. The
* handler simply sets reset the conditional header to make all read requests
* unconditional if reads with concurrent OOB writes are allowed.
*
* @param sendEvent
* - send event context from Windows Azure SDK.
*/
@Override
public void eventOccurred(SendingRequestEvent sendEvent) {
if (!(sendEvent.getConnectionObject() instanceof HttpURLConnection)) {
// Pass if there is no HTTP connection associated with this send
// request.
return;
}
// Capture the HTTP URL connection object and get size of the payload for
// the request.
HttpURLConnection urlConnection = (HttpURLConnection) sendEvent
.getConnectionObject();
// Determine whether this is a download request by checking that the request
// method
// is a "GET" operation.
if (urlConnection.getRequestMethod().equalsIgnoreCase("GET")
&& isOutOfBandIoAllowed()) {
// If concurrent reads on OOB writes are allowed, reset the if-match
// condition on the conditional header.
urlConnection.setRequestProperty(HeaderConstants.IF_MATCH,
ALLOW_ALL_REQUEST_PRECONDITIONS);
// In the Java AzureSDK the packet is signed before firing the
// SendRequest. Setting
// the conditional packet header property changes the contents of the
// packet, therefore the packet has to be re-signed.
try {
// Sign the request. GET's have no payload so the content length is
// zero.
StorageCredentialsHelper.signBlobAndQueueRequest(getCredentials(),
urlConnection, -1L, getOperationContext());
} catch (InvalidKeyException e) {
// Log invalid key exception to track signing error before the send
// fails.
String errString = String.format(
"Received invalid key exception when attempting sign packet."
+ " Cause: %s", e.getCause().toString());
LOG.error(errString);
} catch (StorageException e) {
// Log storage exception to track signing error before the call fails.
String errString = String.format(
"Received storage exception when attempting to sign packet."
+ " Cause: %s", e.getCause().toString());
LOG.error(errString);
}
}
}
}
| 6,211 | 35.757396 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlobMaterialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Indicates whether there are actual blobs indicating the existence of
* directories or whether we're inferring their existence from them having files
* in there.
*/
@InterfaceAudience.Private
enum BlobMaterialization {
/**
* Indicates a directory that isn't backed by an actual blob, but its
* existence is implied by the fact that there are files in there. For
* example, if the blob /a/b exists then it implies the existence of the /a
* directory if there's no /a blob indicating it.
*/
Implicit,
/**
* Indicates that the directory is backed by an actual blob that has the
* isFolder metadata on it.
*/
Explicit,
}
| 1,563 | 35.372093 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
/**
* The interface that every Azure file system key provider must implement.
*/
@InterfaceAudience.Private
public interface KeyProvider {
/**
* Key providers must implement this method. Given a list of configuration
* parameters for the specified Azure storage account, retrieve the plaintext
* storage account key.
*
* @param accountName
* the storage account name
* @param conf
* Hadoop configuration parameters
* @return the plaintext storage account key
* @throws KeyProviderException
*/
String getStorageAccountKey(String accountName, Configuration conf)
throws KeyProviderException;
}
| 1,606 | 35.522727 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlob;
import java.util.concurrent.atomic.AtomicInteger;
/**
* An Azure blob lease that automatically renews itself indefinitely
* using a background thread. Use it to synchronize distributed processes,
* or to prevent writes to the blob by other processes that don't
* have the lease.
*
* Creating a new Lease object blocks the caller until the Azure blob lease is
* acquired.
*
* Attempting to get a lease on a non-existent blob throws StorageException.
*
* Call free() to release the Lease.
*
* You can use this Lease like a distributed lock. If the holder process
* dies, the lease will time out since it won't be renewed.
*/
public class SelfRenewingLease {
private CloudBlobWrapper blobWrapper;
private Thread renewer;
private volatile boolean leaseFreed;
private String leaseID = null;
private static final int LEASE_TIMEOUT = 60; // Lease timeout in seconds
// Time to wait to renew lease in milliseconds
public static final int LEASE_RENEWAL_PERIOD = 40000;
private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class);
// Used to allocate thread serial numbers in thread name
private static AtomicInteger threadNumber = new AtomicInteger(0);
// Time to wait to retry getting the lease in milliseconds
private static final int LEASE_ACQUIRE_RETRY_INTERVAL = 2000;
public SelfRenewingLease(CloudBlobWrapper blobWrapper)
throws StorageException {
this.leaseFreed = false;
this.blobWrapper = blobWrapper;
// Keep trying to get the lease until you get it.
CloudBlob blob = blobWrapper.getBlob();
while(leaseID == null) {
try {
leaseID = blob.acquireLease(LEASE_TIMEOUT, null);
} catch (StorageException e) {
// Throw again if we don't want to keep waiting.
// We expect it to be that the lease is already present,
// or in some cases that the blob does not exist.
if (!e.getErrorCode().equals("LeaseAlreadyPresent")) {
LOG.info(
"Caught exception when trying to get lease on blob "
+ blobWrapper.getUri().toString() + ". " + e.getMessage());
throw e;
}
}
if (leaseID == null) {
try {
Thread.sleep(LEASE_ACQUIRE_RETRY_INTERVAL);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
}
}
renewer = new Thread(new Renewer());
// A Renewer running should not keep JVM from exiting, so make it a daemon.
renewer.setDaemon(true);
renewer.setName("AzureLeaseRenewer-" + threadNumber.getAndIncrement());
renewer.start();
LOG.debug("Acquired lease " + leaseID + " on " + blob.getUri()
+ " managed by thread " + renewer.getName());
}
/**
* Free the lease and stop the keep-alive thread.
* @throws StorageException
*/
public void free() throws StorageException {
AccessCondition accessCondition = AccessCondition.generateEmptyCondition();
accessCondition.setLeaseID(leaseID);
try {
blobWrapper.getBlob().releaseLease(accessCondition);
} catch (StorageException e) {
if (e.getErrorCode().equals("BlobNotFound")) {
// Don't do anything -- it's okay to free a lease
// on a deleted file. The delete freed the lease
// implicitly.
} else {
// This error is not anticipated, so re-throw it.
LOG.warn("Unanticipated exception when trying to free lease " + leaseID
+ " on " + blobWrapper.getStorageUri());
throw(e);
}
} finally {
// Even if releasing the lease fails (e.g. because the file was deleted),
// make sure to record that we freed the lease, to terminate the
// keep-alive thread.
leaseFreed = true;
LOG.debug("Freed lease " + leaseID + " on " + blobWrapper.getUri()
+ " managed by thread " + renewer.getName());
}
}
public boolean isFreed() {
return leaseFreed;
}
public String getLeaseID() {
return leaseID;
}
public CloudBlob getCloudBlob() {
return blobWrapper.getBlob();
}
private class Renewer implements Runnable {
/**
* Start a keep-alive thread that will continue to renew
* the lease until it is freed or the process dies.
*/
@Override
public void run() {
LOG.debug("Starting lease keep-alive thread.");
AccessCondition accessCondition =
AccessCondition.generateEmptyCondition();
accessCondition.setLeaseID(leaseID);
while(!leaseFreed) {
try {
Thread.sleep(LEASE_RENEWAL_PERIOD);
} catch (InterruptedException e) {
LOG.debug("Keep-alive thread for lease " + leaseID +
" interrupted.");
// Restore the interrupted status
Thread.currentThread().interrupt();
}
try {
if (!leaseFreed) {
blobWrapper.getBlob().renewLease(accessCondition);
// It'll be very rare to renew the lease (most will be short)
// so log that we did it, to help with system debugging.
LOG.info("Renewed lease " + leaseID + " on "
+ getCloudBlob().getUri());
}
} catch (StorageException e) {
if (!leaseFreed) {
// Free the lease so we don't leave this thread running forever.
leaseFreed = true;
// Normally leases should be freed and there should be no
// exceptions, so log a warning.
LOG.warn("Attempt to renew lease " + leaseID + " on "
+ getCloudBlob().getUri()
+ " failed, but lease not yet freed. Reason: " +
e.getMessage());
}
}
}
}
}
}
| 6,919 | 32.921569 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasb.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem;
/**
* WASB implementation of AbstractFileSystem.
* This impl delegates to the old FileSystem
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Wasb extends DelegateToFileSystem {
Wasb(final URI theUri, final Configuration conf) throws IOException,
URISyntaxException {
super(theUri, new NativeAzureFileSystem(), conf, "wasb", false);
}
@Override
public int getUriDefaultPort() {
return -1;
}
}
| 1,590 | 32.145833 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ShellDecryptionKeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
/**
* Shell decryption key provider which invokes an external script that will
* perform the key decryption.
*/
@InterfaceAudience.Private
public class ShellDecryptionKeyProvider extends SimpleKeyProvider {
static final String KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT =
"fs.azure.shellkeyprovider.script";
@Override
public String getStorageAccountKey(String accountName, Configuration conf)
throws KeyProviderException {
String envelope = super.getStorageAccountKey(accountName, conf);
final String command = conf.get(KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT);
if (command == null) {
throw new KeyProviderException(
"Script path is not specified via fs.azure.shellkeyprovider.script");
}
String[] cmd = command.split(" ");
String[] cmdWithEnvelope = Arrays.copyOf(cmd, cmd.length + 1);
cmdWithEnvelope[cmdWithEnvelope.length - 1] = envelope;
String decryptedKey = null;
try {
decryptedKey = Shell.execCommand(cmdWithEnvelope);
} catch (IOException ex) {
throw new KeyProviderException(ex);
}
// trim any whitespace
return decryptedKey.trim();
}
}
| 2,186 | 33.714286 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown if there is a problem communicating with Azure Storage service.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AzureException extends IOException {
private static final long serialVersionUID = 1L;
public AzureException(String message) {
super(message);
}
public AzureException(String message, Throwable cause) {
super(message, cause);
}
public AzureException(Throwable t) {
super(t);
}
}
| 1,449 | 31.222222 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.PAGE_DATA_SIZE;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.PAGE_HEADER_SIZE;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.PAGE_SIZE;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.fromShort;
import static org.apache.hadoop.fs.azure.PageBlobFormatHelpers.withMD5Checking;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import com.google.common.annotations.VisibleForTesting;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.CloudPageBlob;
/**
* An output stream that write file data to a page blob stored using ASV's
* custom format.
*/
final class PageBlobOutputStream extends OutputStream implements Syncable {
/**
* The maximum number of raw bytes Azure Storage allows us to upload in a
* single request (4 MB).
*/
private static final int MAX_RAW_BYTES_PER_REQUEST = 4 * 1024 * 1024;
/**
* The maximum number of pages Azure Storage allows us to upload in a
* single request.
*/
private static final int MAX_PAGES_IN_REQUEST =
MAX_RAW_BYTES_PER_REQUEST / PAGE_SIZE;
/**
* The maximum number of data bytes (header not included) we can upload
* in a single request. I'm limiting it to (N - 1) pages to account for
* the possibility that we may have to rewrite the previous request's
* last page.
*/
private static final int MAX_DATA_BYTES_PER_REQUEST =
PAGE_DATA_SIZE * (MAX_PAGES_IN_REQUEST - 1);
private final CloudPageBlobWrapper blob;
private final OperationContext opContext;
/**
* If the IO thread encounters an error, it'll store it here.
*/
private volatile IOException lastError;
/**
* Current size of the page blob in bytes. It may be extended if the file
* gets full.
*/
private long currentBlobSize;
/**
* The current byte offset we're at in the blob (how many bytes we've
* uploaded to the server).
*/
private long currentBlobOffset;
/**
* The data in the last page that we wrote to the server, in case we have to
* overwrite it in the new request.
*/
private byte[] previousLastPageDataWritten = new byte[0];
/**
* The current buffer we're writing to before sending to the server.
*/
private ByteArrayOutputStream outBuffer;
/**
* The task queue for writing to the server.
*/
private final LinkedBlockingQueue<Runnable> ioQueue;
/**
* The thread pool we're using for writing to the server. Note that the IO
* write is NOT designed for parallelism, so there can only be one thread
* in that pool (I'm using the thread pool mainly for the lifetime management
* capabilities, otherwise I'd have just used a simple Thread).
*/
private final ThreadPoolExecutor ioThreadPool;
// The last task given to the ioThreadPool to execute, to allow
// waiting until it's done.
private WriteRequest lastQueuedTask;
// Whether the stream has been closed.
private boolean closed = false;
public static final Log LOG = LogFactory.getLog(AzureNativeFileSystemStore.class);
// Set the minimum page blob file size to 128MB, which is >> the default
// block size of 32MB. This default block size is often used as the
// hbase.regionserver.hlog.blocksize.
// The goal is to have a safe minimum size for HBase log files to allow them
// to be filled and rolled without exceeding the minimum size. A larger size
// can be used by setting the fs.azure.page.blob.size configuration variable.
public static final long PAGE_BLOB_MIN_SIZE = 128L * 1024L * 1024L;
// The default and minimum amount to extend a page blob by if it starts
// to get full.
public static final long
PAGE_BLOB_DEFAULT_EXTENSION_SIZE = 128L * 1024L * 1024L;
// The configured page blob extension size (either the default, or if greater,
// the value configured in fs.azure.page.blob.extension.size
private long configuredPageBlobExtensionSize;
/**
* Constructs an output stream over the given page blob.
*
* @param blob the blob that this stream is associated with.
* @param opContext an object used to track the execution of the operation
* @throws StorageException if anything goes wrong creating the blob.
*/
public PageBlobOutputStream(final CloudPageBlobWrapper blob,
final OperationContext opContext,
final Configuration conf) throws StorageException {
this.blob = blob;
this.outBuffer = new ByteArrayOutputStream();
this.opContext = opContext;
this.lastQueuedTask = null;
this.ioQueue = new LinkedBlockingQueue<Runnable>();
// As explained above: the IO writes are not designed for parallelism,
// so we only have one thread in this thread pool.
this.ioThreadPool = new ThreadPoolExecutor(1, 1, 2, TimeUnit.SECONDS,
ioQueue);
// Make page blob files have a size that is the greater of a
// minimum size, or the value of fs.azure.page.blob.size from configuration.
long pageBlobConfigSize = conf.getLong("fs.azure.page.blob.size", 0);
LOG.debug("Read value of fs.azure.page.blob.size as " + pageBlobConfigSize
+ " from configuration (0 if not present).");
long pageBlobSize = Math.max(PAGE_BLOB_MIN_SIZE, pageBlobConfigSize);
// Ensure that the pageBlobSize is a multiple of page size.
if (pageBlobSize % PAGE_SIZE != 0) {
pageBlobSize += PAGE_SIZE - pageBlobSize % PAGE_SIZE;
}
blob.create(pageBlobSize, new BlobRequestOptions(), opContext);
currentBlobSize = pageBlobSize;
// Set the page blob extension size. It must be a minimum of the default
// value.
configuredPageBlobExtensionSize =
conf.getLong("fs.azure.page.blob.extension.size", 0);
if (configuredPageBlobExtensionSize < PAGE_BLOB_DEFAULT_EXTENSION_SIZE) {
configuredPageBlobExtensionSize = PAGE_BLOB_DEFAULT_EXTENSION_SIZE;
}
// make sure it is a multiple of the page size
if (configuredPageBlobExtensionSize % PAGE_SIZE != 0) {
configuredPageBlobExtensionSize +=
PAGE_SIZE - configuredPageBlobExtensionSize % PAGE_SIZE;
}
}
private void checkStreamState() throws IOException {
if (lastError != null) {
throw lastError;
}
}
/**
* Closes this output stream and releases any system resources associated with
* this stream. If any data remains in the buffer it is committed to the
* service.
*/
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
LOG.debug("Closing page blob output stream.");
flush();
checkStreamState();
ioThreadPool.shutdown();
try {
LOG.debug(ioThreadPool.toString());
if (!ioThreadPool.awaitTermination(10, TimeUnit.MINUTES)) {
LOG.debug("Timed out after 10 minutes waiting for IO requests to finish");
logAllStackTraces();
LOG.debug(ioThreadPool.toString());
throw new IOException("Timed out waiting for IO requests to finish");
}
} catch (InterruptedException e) {
LOG.debug("Caught InterruptedException");
// Restore the interrupted status
Thread.currentThread().interrupt();
}
closed = true;
}
// Log the stacks of all threads.
private void logAllStackTraces() {
Map liveThreads = Thread.getAllStackTraces();
for (Iterator i = liveThreads.keySet().iterator(); i.hasNext(); ) {
Thread key = (Thread) i.next();
LOG.debug("Thread " + key.getName());
StackTraceElement[] trace = (StackTraceElement[]) liveThreads.get(key);
for (int j = 0; j < trace.length; j++) {
LOG.debug("\tat " + trace[j]);
}
}
}
/**
* A single write request for data to write to Azure storage.
*/
private class WriteRequest implements Runnable {
private final byte[] dataPayload;
private final CountDownLatch doneSignal = new CountDownLatch(1);
public WriteRequest(byte[] dataPayload) {
this.dataPayload = dataPayload;
}
public void waitTillDone() throws InterruptedException {
doneSignal.await();
}
@Override
public void run() {
try {
LOG.debug("before runInternal()");
runInternal();
LOG.debug("after runInternal()");
} finally {
doneSignal.countDown();
}
}
private void runInternal() {
if (lastError != null) {
// We're already in an error state, no point doing anything.
return;
}
if (dataPayload.length == 0) {
// Nothing to do.
return;
}
// Since we have to rewrite the last request's last page's data
// (may be empty), total data size is our data plus whatever was
// left from there.
final int totalDataBytes = dataPayload.length
+ previousLastPageDataWritten.length;
// Calculate the total number of pages we're writing to the server.
final int numberOfPages = (totalDataBytes / PAGE_DATA_SIZE)
+ (totalDataBytes % PAGE_DATA_SIZE == 0 ? 0 : 1);
// Fill up the raw bytes we're writing.
byte[] rawPayload = new byte[numberOfPages * PAGE_SIZE];
// Keep track of the size of the last page we uploaded.
int currentLastPageDataSize = -1;
for (int page = 0; page < numberOfPages; page++) {
// Our current byte offset in the data.
int dataOffset = page * PAGE_DATA_SIZE;
// Our current byte offset in the raw buffer.
int rawOffset = page * PAGE_SIZE;
// The size of the data in the current page.
final short currentPageDataSize = (short) Math.min(PAGE_DATA_SIZE,
totalDataBytes - dataOffset);
// Save off this page's size as the potential last page's size.
currentLastPageDataSize = currentPageDataSize;
// Write out the page size in the header.
final byte[] header = fromShort(currentPageDataSize);
System.arraycopy(header, 0, rawPayload, rawOffset, header.length);
rawOffset += header.length;
int bytesToCopyFromDataPayload = currentPageDataSize;
if (dataOffset < previousLastPageDataWritten.length) {
// First write out the last page's data.
final int bytesToCopyFromLastPage = Math.min(currentPageDataSize,
previousLastPageDataWritten.length - dataOffset);
System.arraycopy(previousLastPageDataWritten, dataOffset,
rawPayload, rawOffset, bytesToCopyFromLastPage);
bytesToCopyFromDataPayload -= bytesToCopyFromLastPage;
rawOffset += bytesToCopyFromLastPage;
dataOffset += bytesToCopyFromLastPage;
}
if (dataOffset >= previousLastPageDataWritten.length) {
// Then write the current payload's data.
System.arraycopy(dataPayload,
dataOffset - previousLastPageDataWritten.length,
rawPayload, rawOffset, bytesToCopyFromDataPayload);
}
}
// Raw payload constructed, ship it off to the server.
writePayloadToServer(rawPayload);
// Post-send bookkeeping.
currentBlobOffset += rawPayload.length;
if (currentLastPageDataSize < PAGE_DATA_SIZE) {
// Partial page, save it off so it's overwritten in the next request.
final int startOffset = (numberOfPages - 1) * PAGE_SIZE + PAGE_HEADER_SIZE;
previousLastPageDataWritten = Arrays.copyOfRange(rawPayload,
startOffset,
startOffset + currentLastPageDataSize);
// Since we're rewriting this page, set our current offset in the server
// to that page's beginning.
currentBlobOffset -= PAGE_SIZE;
} else {
// It wasn't a partial page, we won't need to rewrite it.
previousLastPageDataWritten = new byte[0];
}
// Extend the file if we need more room in the file. This typically takes
// less than 200 milliseconds if it has to actually be done,
// so it is okay to include it in a write and won't cause a long pause.
// Other writes can be queued behind this write in any case.
conditionalExtendFile();
}
/**
* Writes the given raw payload to Azure Storage at the current blob
* offset.
*/
private void writePayloadToServer(byte[] rawPayload) {
final ByteArrayInputStream wrapperStream =
new ByteArrayInputStream(rawPayload);
LOG.debug("writing payload of " + rawPayload.length + " bytes to Azure page blob");
try {
long start = System.currentTimeMillis();
blob.uploadPages(wrapperStream, currentBlobOffset, rawPayload.length,
withMD5Checking(), PageBlobOutputStream.this.opContext);
long end = System.currentTimeMillis();
LOG.trace("Azure uploadPages time for " + rawPayload.length + " bytes = " + (end - start));
} catch (IOException ex) {
LOG.debug(ExceptionUtils.getStackTrace(ex));
lastError = ex;
} catch (StorageException ex) {
LOG.debug(ExceptionUtils.getStackTrace(ex));
lastError = new IOException(ex);
}
if (lastError != null) {
LOG.debug("Caught error in PageBlobOutputStream#writePayloadToServer()");
}
}
}
private synchronized void flushIOBuffers() {
if (outBuffer.size() == 0) {
return;
}
lastQueuedTask = new WriteRequest(outBuffer.toByteArray());
ioThreadPool.execute(lastQueuedTask);
outBuffer = new ByteArrayOutputStream();
}
/**
* Extend the page blob file if we are close to the end.
*/
private void conditionalExtendFile() {
// maximum allowed size of an Azure page blob (1 terabyte)
final long MAX_PAGE_BLOB_SIZE = 1024L * 1024L * 1024L * 1024L;
// If blob is already at the maximum size, then don't try to extend it.
if (currentBlobSize == MAX_PAGE_BLOB_SIZE) {
return;
}
// If we are within the maximum write size of the end of the file,
if (currentBlobSize - currentBlobOffset <= MAX_RAW_BYTES_PER_REQUEST) {
// Extend the file. Retry up to 3 times with back-off.
CloudPageBlob cloudPageBlob = (CloudPageBlob) blob.getBlob();
long newSize = currentBlobSize + configuredPageBlobExtensionSize;
// Make sure we don't exceed maximum blob size.
if (newSize > MAX_PAGE_BLOB_SIZE) {
newSize = MAX_PAGE_BLOB_SIZE;
}
final int MAX_RETRIES = 3;
int retries = 1;
boolean resizeDone = false;
while(!resizeDone && retries <= MAX_RETRIES) {
try {
cloudPageBlob.resize(newSize);
resizeDone = true;
currentBlobSize = newSize;
} catch (StorageException e) {
LOG.warn("Failed to extend size of " + cloudPageBlob.getUri());
try {
// sleep 2, 8, 18 seconds for up to 3 retries
Thread.sleep(2000 * retries * retries);
} catch (InterruptedException e1) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
} finally {
retries++;
}
}
}
}
/**
* Flushes this output stream and forces any buffered output bytes to be
* written out. If any data remains in the buffer it is committed to the
* service. Data is queued for writing but not forced out to the service
* before the call returns.
*/
@Override
public void flush() throws IOException {
checkStreamState();
flushIOBuffers();
}
/**
* Writes b.length bytes from the specified byte array to this output stream.
*
* @param data
* the byte array to write.
*
* @throws IOException
* if an I/O error occurs. In particular, an IOException may be
* thrown if the output stream has been closed.
*/
@Override
public void write(final byte[] data) throws IOException {
write(data, 0, data.length);
}
/**
* Writes length bytes from the specified byte array starting at offset to
* this output stream.
*
* @param data
* the byte array to write.
* @param offset
* the start offset in the data.
* @param length
* the number of bytes to write.
* @throws IOException
* if an I/O error occurs. In particular, an IOException may be
* thrown if the output stream has been closed.
*/
@Override
public void write(final byte[] data, final int offset, final int length)
throws IOException {
if (offset < 0 || length < 0 || length > data.length - offset) {
throw new IndexOutOfBoundsException();
}
writeInternal(data, offset, length);
}
/**
* Writes the specified byte to this output stream. The general contract for
* write is that one byte is written to the output stream. The byte to be
* written is the eight low-order bits of the argument b. The 24 high-order
* bits of b are ignored.
*
* @param byteVal
* the byteValue to write.
* @throws IOException
* if an I/O error occurs. In particular, an IOException may be
* thrown if the output stream has been closed.
*/
@Override
public void write(final int byteVal) throws IOException {
write(new byte[] { (byte) (byteVal & 0xFF) });
}
/**
* Writes the data to the buffer and triggers writes to the service as needed.
*
* @param data
* the byte array to write.
* @param offset
* the start offset in the data.
* @param length
* the number of bytes to write.
* @throws IOException
* if an I/O error occurs. In particular, an IOException may be
* thrown if the output stream has been closed.
*/
private synchronized void writeInternal(final byte[] data, int offset,
int length) throws IOException {
while (length > 0) {
checkStreamState();
final int availableBufferBytes = MAX_DATA_BYTES_PER_REQUEST
- this.outBuffer.size();
final int nextWrite = Math.min(availableBufferBytes, length);
outBuffer.write(data, offset, nextWrite);
offset += nextWrite;
length -= nextWrite;
if (outBuffer.size() > MAX_DATA_BYTES_PER_REQUEST) {
throw new RuntimeException("Internal error: maximum write size " +
Integer.toString(MAX_DATA_BYTES_PER_REQUEST) + "exceeded.");
}
if (outBuffer.size() == MAX_DATA_BYTES_PER_REQUEST) {
flushIOBuffers();
}
}
}
/**
* Force all data in the output stream to be written to Azure storage.
* Wait to return until this is complete.
*/
@Override
public synchronized void hsync() throws IOException {
LOG.debug("Entering PageBlobOutputStream#hsync().");
long start = System.currentTimeMillis();
flush();
LOG.debug(ioThreadPool.toString());
try {
if (lastQueuedTask != null) {
lastQueuedTask.waitTillDone();
}
} catch (InterruptedException e1) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
LOG.debug("Leaving PageBlobOutputStream#hsync(). Total hsync duration = "
+ (System.currentTimeMillis() - start) + " msec.");
}
@Override
public void hflush() throws IOException {
// hflush is required to force data to storage, so call hsync,
// which does that.
hsync();
}
@Deprecated
public void sync() throws IOException {
// Sync has been deprecated in favor of hflush.
hflush();
}
// For unit testing purposes: kill the IO threads.
@VisibleForTesting
void killIoThreads() {
ioThreadPool.shutdownNow();
}
}
| 21,167 | 34.817259 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.nio.ByteBuffer;
import com.microsoft.azure.storage.blob.BlobRequestOptions;
/**
* Constants and helper methods for ASV's custom data format in page blobs.
*/
final class PageBlobFormatHelpers {
public static final short PAGE_SIZE = 512;
public static final short PAGE_HEADER_SIZE = 2;
public static final short PAGE_DATA_SIZE = PAGE_SIZE - PAGE_HEADER_SIZE;
// Hide constructor for utility class.
private PageBlobFormatHelpers() {
}
/**
* Stores the given short as a two-byte array.
*/
public static byte[] fromShort(short s) {
return ByteBuffer.allocate(2).putShort(s).array();
}
/**
* Retrieves a short from the given two bytes.
*/
public static short toShort(byte firstByte, byte secondByte) {
return ByteBuffer.wrap(new byte[] { firstByte, secondByte })
.getShort();
}
public static BlobRequestOptions withMD5Checking() {
BlobRequestOptions options = new BlobRequestOptions();
options.setUseTransactionalContentMD5(true);
return options;
}
}
| 1,886 | 30.983051 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import org.apache.hadoop.fs.permission.PermissionStatus;
import com.google.common.annotations.VisibleForTesting;
/**
* <p>
* An abstraction for a key-based {@link File} store.
* </p>
*/
@InterfaceAudience.Private
interface NativeFileSystemStore {
void initialize(URI uri, Configuration conf, AzureFileSystemInstrumentation instrumentation) throws IOException;
void storeEmptyFolder(String key, PermissionStatus permissionStatus)
throws AzureException;
FileMetadata retrieveMetadata(String key) throws IOException;
DataInputStream retrieve(String key) throws IOException;
DataInputStream retrieve(String key, long byteRangeStart) throws IOException;
DataOutputStream storefile(String key, PermissionStatus permissionStatus)
throws AzureException;
boolean isPageBlobKey(String key);
boolean isAtomicRenameKey(String key);
void storeEmptyLinkFile(String key, String tempBlobKey,
PermissionStatus permissionStatus) throws AzureException;
String getLinkInFileMetadata(String key) throws AzureException;
PartialListing list(String prefix, final int maxListingCount,
final int maxListingDepth) throws IOException;
PartialListing list(String prefix, final int maxListingCount,
final int maxListingDepth, String priorLastKey) throws IOException;
PartialListing listAll(String prefix, final int maxListingCount,
final int maxListingDepth, String priorLastKey) throws IOException;
void changePermissionStatus(String key, PermissionStatus newPermission)
throws AzureException;
void delete(String key) throws IOException;
void rename(String srcKey, String dstKey) throws IOException;
void rename(String srcKey, String dstKey, boolean acquireLease, SelfRenewingLease existingLease)
throws IOException;
/**
* Delete all keys with the given prefix. Used for testing.
*
* @throws IOException
*/
@VisibleForTesting
void purge(String prefix) throws IOException;
/**
* Diagnostic method to dump state to the console.
*
* @throws IOException
*/
void dump() throws IOException;
void close();
void updateFolderLastModifiedTime(String key, SelfRenewingLease folderLease)
throws AzureException;
void updateFolderLastModifiedTime(String key, Date lastModified,
SelfRenewingLease folderLease) throws AzureException;
void delete(String key, SelfRenewingLease lease) throws IOException;
SelfRenewingLease acquireLease(String key) throws AzureException;
}
| 3,661 | 31.990991 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.azure.NativeAzureFileSystem.PATH_DELIMITER;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.security.InvalidKeyException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobContainerWrapper;
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobDirectoryWrapper;
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlockBlobWrapper;
import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
import org.apache.hadoop.fs.azure.StorageInterfaceImpl.CloudPageBlobWrapperImpl;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import org.apache.hadoop.fs.azure.metrics.BandwidthGaugeUpdater;
import org.apache.hadoop.fs.azure.metrics.ErrorMetricUpdater;
import org.apache.hadoop.fs.azure.metrics.ResponseReceivedMetricUpdater;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
import com.microsoft.azure.storage.CloudStorageAccount;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.RetryExponentialRetry;
import com.microsoft.azure.storage.RetryNoRetry;
import com.microsoft.azure.storage.StorageCredentials;
import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature;
import com.microsoft.azure.storage.StorageErrorCode;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.BlobListingDetails;
import com.microsoft.azure.storage.blob.BlobProperties;
import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.CloudBlob;
import com.microsoft.azure.storage.blob.CopyStatus;
import com.microsoft.azure.storage.blob.DeleteSnapshotsOption;
import com.microsoft.azure.storage.blob.ListBlobItem;
import com.microsoft.azure.storage.core.Utility;
/**
* Core implementation of Windows Azure Filesystem for Hadoop.
* Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage
*
*/
@InterfaceAudience.Private
@VisibleForTesting
public class AzureNativeFileSystemStore implements NativeFileSystemStore {
/**
* Configuration knob on whether we do block-level MD5 validation on
* upload/download.
*/
static final String KEY_CHECK_BLOCK_MD5 = "fs.azure.check.block.md5";
/**
* Configuration knob on whether we store blob-level MD5 on upload.
*/
static final String KEY_STORE_BLOB_MD5 = "fs.azure.store.blob.md5";
static final String DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME = "storageemulator";
static final String STORAGE_EMULATOR_ACCOUNT_NAME_PROPERTY_NAME = "fs.azure.storage.emulator.account.name";
public static final Log LOG = LogFactory
.getLog(AzureNativeFileSystemStore.class);
private StorageInterface storageInteractionLayer;
private CloudBlobDirectoryWrapper rootDirectory;
private CloudBlobContainerWrapper container;
// Constants local to this class.
//
private static final String KEY_ACCOUNT_KEYPROVIDER_PREFIX = "fs.azure.account.keyprovider.";
private static final String KEY_ACCOUNT_SAS_PREFIX = "fs.azure.sas.";
// note: this value is not present in core-default.xml as our real default is
// computed as min(2*cpu,8)
private static final String KEY_CONCURRENT_CONNECTION_VALUE_OUT = "fs.azure.concurrentRequestCount.out";
private static final String KEY_STREAM_MIN_READ_SIZE = "fs.azure.read.request.size";
private static final String KEY_STORAGE_CONNECTION_TIMEOUT = "fs.azure.storage.timeout";
private static final String KEY_WRITE_BLOCK_SIZE = "fs.azure.write.request.size";
// Property controlling whether to allow reads on blob which are concurrently
// appended out-of-band.
private static final String KEY_READ_TOLERATE_CONCURRENT_APPEND = "fs.azure.io.read.tolerate.concurrent.append";
// Configurable throttling parameter properties. These properties are located
// in the core-site.xml configuration file.
private static final String KEY_MIN_BACKOFF_INTERVAL = "fs.azure.io.retry.min.backoff.interval";
private static final String KEY_MAX_BACKOFF_INTERVAL = "fs.azure.io.retry.max.backoff.interval";
private static final String KEY_BACKOFF_INTERVAL = "fs.azure.io.retry.backoff.interval";
private static final String KEY_MAX_IO_RETRIES = "fs.azure.io.retry.max.retries";
private static final String KEY_COPYBLOB_MIN_BACKOFF_INTERVAL =
"fs.azure.io.copyblob.retry.min.backoff.interval";
private static final String KEY_COPYBLOB_MAX_BACKOFF_INTERVAL =
"fs.azure.io.copyblob.retry.max.backoff.interval";
private static final String KEY_COPYBLOB_BACKOFF_INTERVAL =
"fs.azure.io.copyblob.retry.backoff.interval";
private static final String KEY_COPYBLOB_MAX_IO_RETRIES =
"fs.azure.io.copyblob.retry.max.retries";
private static final String KEY_SELF_THROTTLE_ENABLE = "fs.azure.selfthrottling.enable";
private static final String KEY_SELF_THROTTLE_READ_FACTOR = "fs.azure.selfthrottling.read.factor";
private static final String KEY_SELF_THROTTLE_WRITE_FACTOR = "fs.azure.selfthrottling.write.factor";
private static final String KEY_ENABLE_STORAGE_CLIENT_LOGGING = "fs.azure.storage.client.logging";
private static final String PERMISSION_METADATA_KEY = "hdi_permission";
private static final String OLD_PERMISSION_METADATA_KEY = "asv_permission";
private static final String IS_FOLDER_METADATA_KEY = "hdi_isfolder";
private static final String OLD_IS_FOLDER_METADATA_KEY = "asv_isfolder";
static final String VERSION_METADATA_KEY = "hdi_version";
static final String OLD_VERSION_METADATA_KEY = "asv_version";
static final String FIRST_WASB_VERSION = "2013-01-01";
static final String CURRENT_WASB_VERSION = "2013-09-01";
static final String LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY = "hdi_tmpupload";
static final String OLD_LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY = "asv_tmpupload";
/**
* Configuration key to indicate the set of directories in WASB where we
* should store files as page blobs instead of block blobs.
*
* Entries should be plain directory names (i.e. not URIs) with no leading or
* trailing slashes. Delimit the entries with commas.
*/
public static final String KEY_PAGE_BLOB_DIRECTORIES =
"fs.azure.page.blob.dir";
/**
* The set of directories where we should store files as page blobs.
*/
private Set<String> pageBlobDirs;
/**
* Configuration key to indicate the set of directories in WASB where
* we should do atomic folder rename synchronized with createNonRecursive.
*/
public static final String KEY_ATOMIC_RENAME_DIRECTORIES =
"fs.azure.atomic.rename.dir";
/**
* The set of directories where we should apply atomic folder rename
* synchronized with createNonRecursive.
*/
private Set<String> atomicRenameDirs;
private static final String HTTP_SCHEME = "http";
private static final String HTTPS_SCHEME = "https";
private static final String WASB_AUTHORITY_DELIMITER = "@";
private static final String AZURE_ROOT_CONTAINER = "$root";
private static final int DEFAULT_CONCURRENT_WRITES = 8;
// Concurrent reads reads of data written out of band are disable by default.
//
private static final boolean DEFAULT_READ_TOLERATE_CONCURRENT_APPEND = false;
// Default block sizes
public static final int DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
public static final int DEFAULT_UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
// Retry parameter defaults.
//
private static final int DEFAULT_MIN_BACKOFF_INTERVAL = 1 * 1000; // 1s
private static final int DEFAULT_MAX_BACKOFF_INTERVAL = 30 * 1000; // 30s
private static final int DEFAULT_BACKOFF_INTERVAL = 1 * 1000; // 1s
private static final int DEFAULT_MAX_RETRY_ATTEMPTS = 15;
private static final int DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL = 3 * 1000;
private static final int DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL = 90 * 1000;
private static final int DEFAULT_COPYBLOB_BACKOFF_INTERVAL = 30 * 1000;
private static final int DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS = 15;
// Self-throttling defaults. Allowed range = (0,1.0]
// Value of 1.0 means no self-throttling.
// Value of x means process data at factor x of unrestricted rate
private static final boolean DEFAULT_SELF_THROTTLE_ENABLE = true;
private static final float DEFAULT_SELF_THROTTLE_READ_FACTOR = 1.0f;
private static final float DEFAULT_SELF_THROTTLE_WRITE_FACTOR = 1.0f;
private static final int STORAGE_CONNECTION_TIMEOUT_DEFAULT = 90;
/**
* MEMBER VARIABLES
*/
private URI sessionUri;
private Configuration sessionConfiguration;
private int concurrentWrites = DEFAULT_CONCURRENT_WRITES;
private boolean isAnonymousCredentials = false;
// Set to true if we are connecting using shared access signatures.
private boolean connectingUsingSAS = false;
private AzureFileSystemInstrumentation instrumentation;
private BandwidthGaugeUpdater bandwidthGaugeUpdater;
private final static JSON PERMISSION_JSON_SERIALIZER = createPermissionJsonSerializer();
private boolean suppressRetryPolicy = false;
private boolean canCreateOrModifyContainer = false;
private ContainerState currentKnownContainerState = ContainerState.Unknown;
private final Object containerStateLock = new Object();
private boolean tolerateOobAppends = DEFAULT_READ_TOLERATE_CONCURRENT_APPEND;
private int downloadBlockSizeBytes = DEFAULT_DOWNLOAD_BLOCK_SIZE;
private int uploadBlockSizeBytes = DEFAULT_UPLOAD_BLOCK_SIZE;
// Bandwidth throttling exponential back-off parameters
//
private int minBackoff; // the minimum back-off interval (ms) between retries.
private int maxBackoff; // the maximum back-off interval (ms) between retries.
private int deltaBackoff; // the back-off interval (ms) between retries.
private int maxRetries; // the maximum number of retry attempts.
// Self-throttling parameters
private boolean selfThrottlingEnabled;
private float selfThrottlingReadFactor;
private float selfThrottlingWriteFactor;
private TestHookOperationContext testHookOperationContext = null;
// Set if we're running against a storage emulator..
private boolean isStorageEmulator = false;
/**
* A test hook interface that can modify the operation context we use for
* Azure Storage operations, e.g. to inject errors.
*/
@VisibleForTesting
interface TestHookOperationContext {
OperationContext modifyOperationContext(OperationContext original);
}
/**
* Suppress the default retry policy for the Storage, useful in unit tests to
* test negative cases without waiting forever.
*/
@VisibleForTesting
void suppressRetryPolicy() {
suppressRetryPolicy = true;
}
/**
* Add a test hook to modify the operation context we use for Azure Storage
* operations.
*
* @param testHook
* The test hook, or null to unset previous hooks.
*/
@VisibleForTesting
void addTestHookToOperationContext(TestHookOperationContext testHook) {
this.testHookOperationContext = testHook;
}
/**
* If we're asked by unit tests to not retry, set the retry policy factory in
* the client accordingly.
*/
private void suppressRetryPolicyInClientIfNeeded() {
if (suppressRetryPolicy) {
storageInteractionLayer.setRetryPolicyFactory(new RetryNoRetry());
}
}
/**
* Creates a JSON serializer that can serialize a PermissionStatus object into
* the JSON string we want in the blob metadata.
*
* @return The JSON serializer.
*/
private static JSON createPermissionJsonSerializer() {
JSON serializer = new JSON();
serializer.addConvertor(PermissionStatus.class,
new PermissionStatusJsonSerializer());
return serializer;
}
/**
* A converter for PermissionStatus to/from JSON as we want it in the blob
* metadata.
*/
private static class PermissionStatusJsonSerializer implements JSON.Convertor {
private static final String OWNER_TAG = "owner";
private static final String GROUP_TAG = "group";
private static final String PERMISSIONS_TAG = "permissions";
@Override
public void toJSON(Object obj, JSON.Output out) {
PermissionStatus permissionStatus = (PermissionStatus) obj;
// Don't store group as null, just store it as empty string
// (which is FileStatus behavior).
String group = permissionStatus.getGroupName() == null ? ""
: permissionStatus.getGroupName();
out.add(OWNER_TAG, permissionStatus.getUserName());
out.add(GROUP_TAG, group);
out.add(PERMISSIONS_TAG, permissionStatus.getPermission().toString());
}
@Override
public Object fromJSON(@SuppressWarnings("rawtypes") Map object) {
return PermissionStatusJsonSerializer.fromJSONMap(object);
}
@SuppressWarnings("rawtypes")
public static PermissionStatus fromJSONString(String jsonString) {
// The JSON class can only find out about an object's class (and call me)
// if we store the class name in the JSON string. Since I don't want to
// do that (it's an implementation detail), I just deserialize as a
// the default Map (JSON's default behavior) and parse that.
return fromJSONMap((Map) PERMISSION_JSON_SERIALIZER.fromJSON(jsonString));
}
private static PermissionStatus fromJSONMap(
@SuppressWarnings("rawtypes") Map object) {
return new PermissionStatus((String) object.get(OWNER_TAG),
(String) object.get(GROUP_TAG),
// The initial - below is the Unix file type,
// which FsPermission needs there but ignores.
FsPermission.valueOf("-" + (String) object.get(PERMISSIONS_TAG)));
}
}
@VisibleForTesting
void setAzureStorageInteractionLayer(StorageInterface storageInteractionLayer) {
this.storageInteractionLayer = storageInteractionLayer;
}
@VisibleForTesting
public BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
return bandwidthGaugeUpdater;
}
/**
* Check if concurrent reads and writes on the same blob are allowed.
*
* @return true if concurrent reads and OOB writes has been configured, false
* otherwise.
*/
private boolean isConcurrentOOBAppendAllowed() {
return tolerateOobAppends;
}
/**
* Method for the URI and configuration object necessary to create a storage
* session with an Azure session. It parses the scheme to ensure it matches
* the storage protocol supported by this file system.
*
* @param uri - URI for target storage blob.
* @param conf - reference to configuration object.
* @param instrumentation - the metrics source that will keep track of operations here.
*
* @throws IllegalArgumentException if URI or job object is null, or invalid scheme.
*/
@Override
public void initialize(URI uri, Configuration conf, AzureFileSystemInstrumentation instrumentation)
throws IllegalArgumentException, AzureException, IOException {
if (null == instrumentation) {
throw new IllegalArgumentException("Null instrumentation");
}
this.instrumentation = instrumentation;
if (null == this.storageInteractionLayer) {
this.storageInteractionLayer = new StorageInterfaceImpl();
}
// Check that URI exists.
//
if (null == uri) {
throw new IllegalArgumentException(
"Cannot initialize WASB file system, URI is null");
}
// Check that configuration object is non-null.
//
if (null == conf) {
throw new IllegalArgumentException(
"Cannot initialize WASB file system, conf is null");
}
if(!conf.getBoolean(
NativeAzureFileSystem.SKIP_AZURE_METRICS_PROPERTY_NAME, false)) {
//If not skip azure metrics, create bandwidthGaugeUpdater
this.bandwidthGaugeUpdater = new BandwidthGaugeUpdater(instrumentation);
}
// Incoming parameters validated. Capture the URI and the job configuration
// object.
//
sessionUri = uri;
sessionConfiguration = conf;
// Start an Azure storage session.
//
createAzureStorageSession();
// Extract the directories that should contain page blobs
pageBlobDirs = getDirectorySet(KEY_PAGE_BLOB_DIRECTORIES);
LOG.debug("Page blob directories: " + setToString(pageBlobDirs));
// Extract directories that should have atomic rename applied.
atomicRenameDirs = getDirectorySet(KEY_ATOMIC_RENAME_DIRECTORIES);
String hbaseRoot;
try {
// Add to this the hbase root directory, or /hbase is that is not set.
hbaseRoot = verifyAndConvertToStandardFormat(
sessionConfiguration.get("hbase.rootdir", "hbase"));
atomicRenameDirs.add(hbaseRoot);
} catch (URISyntaxException e) {
LOG.warn("Unable to initialize HBase root as an atomic rename directory.");
}
LOG.debug("Atomic rename directories: " + setToString(atomicRenameDirs));
}
/**
* Helper to format a string for log output from Set<String>
*/
private String setToString(Set<String> set) {
StringBuilder sb = new StringBuilder();
int i = 1;
for (String s : set) {
sb.append("/" + s);
if (i != set.size()) {
sb.append(", ");
}
i++;
}
return sb.toString();
}
/**
* Method to extract the account name from an Azure URI.
*
* @param uri
* -- WASB blob URI
* @returns accountName -- the account name for the URI.
* @throws URISyntaxException
* if the URI does not have an authority it is badly formed.
*/
private String getAccountFromAuthority(URI uri) throws URISyntaxException {
// Check to make sure that the authority is valid for the URI.
//
String authority = uri.getRawAuthority();
if (null == authority) {
// Badly formed or illegal URI.
//
throw new URISyntaxException(uri.toString(),
"Expected URI with a valid authority");
}
// Check if authority container the delimiter separating the account name from the
// the container.
//
if (!authority.contains(WASB_AUTHORITY_DELIMITER)) {
return authority;
}
// Split off the container name and the authority.
//
String[] authorityParts = authority.split(WASB_AUTHORITY_DELIMITER, 2);
// Because the string contains an '@' delimiter, a container must be
// specified.
//
if (authorityParts.length < 2 || "".equals(authorityParts[0])) {
// Badly formed WASB authority since there is no container.
//
final String errMsg = String
.format(
"URI '%s' has a malformed WASB authority, expected container name. "
+ "Authority takes the form wasb://[<container name>@]<account name>",
uri.toString());
throw new IllegalArgumentException(errMsg);
}
// Return with the account name. It is possible that this name is NULL.
//
return authorityParts[1];
}
/**
* Method to extract the container name from an Azure URI.
*
* @param uri
* -- WASB blob URI
* @returns containerName -- the container name for the URI. May be null.
* @throws URISyntaxException
* if the uri does not have an authority it is badly formed.
*/
private String getContainerFromAuthority(URI uri) throws URISyntaxException {
// Check to make sure that the authority is valid for the URI.
//
String authority = uri.getRawAuthority();
if (null == authority) {
// Badly formed or illegal URI.
//
throw new URISyntaxException(uri.toString(),
"Expected URI with a valid authority");
}
// The URI has a valid authority. Extract the container name. It is the
// second component of the WASB URI authority.
if (!authority.contains(WASB_AUTHORITY_DELIMITER)) {
// The authority does not have a container name. Use the default container by
// setting the container name to the default Azure root container.
//
return AZURE_ROOT_CONTAINER;
}
// Split off the container name and the authority.
String[] authorityParts = authority.split(WASB_AUTHORITY_DELIMITER, 2);
// Because the string contains an '@' delimiter, a container must be
// specified.
if (authorityParts.length < 2 || "".equals(authorityParts[0])) {
// Badly formed WASB authority since there is no container.
final String errMsg = String
.format(
"URI '%s' has a malformed WASB authority, expected container name."
+ "Authority takes the form wasb://[<container name>@]<account name>",
uri.toString());
throw new IllegalArgumentException(errMsg);
}
// Set the container name from the first entry for the split parts of the
// authority.
return authorityParts[0];
}
/**
* Get the appropriate return the appropriate scheme for communicating with
* Azure depending on whether wasb or wasbs is specified in the target URI.
*
* return scheme - HTTPS or HTTP as appropriate.
*/
private String getHTTPScheme() {
String sessionScheme = sessionUri.getScheme();
// Check if we're on a secure URI scheme: wasbs or the legacy asvs scheme.
if (sessionScheme != null &&
(sessionScheme.equalsIgnoreCase("asvs") ||
sessionScheme.equalsIgnoreCase("wasbs"))) {
return HTTPS_SCHEME;
} else {
// At this point the scheme should be either null or asv or wasb.
// Intentionally I'm not going to validate it though since I don't feel
// it's this method's job to ensure a valid URI scheme for this file
// system.
return HTTP_SCHEME;
}
}
/**
* Set the configuration parameters for this client storage session with
* Azure.
*
* @throws AzureException
*/
private void configureAzureStorageSession() throws AzureException {
// Assertion: Target session URI already should have been captured.
if (sessionUri == null) {
throw new AssertionError(
"Expected a non-null session URI when configuring storage session");
}
// Assertion: A client session already should have been established with
// Azure.
if (storageInteractionLayer == null) {
throw new AssertionError(String.format(
"Cannot configure storage session for URI '%s' "
+ "if storage session has not been established.",
sessionUri.toString()));
}
// Determine whether or not reads are allowed concurrent with OOB writes.
tolerateOobAppends = sessionConfiguration.getBoolean(
KEY_READ_TOLERATE_CONCURRENT_APPEND,
DEFAULT_READ_TOLERATE_CONCURRENT_APPEND);
// Retrieve configuration for the minimum stream read and write block size.
//
this.downloadBlockSizeBytes = sessionConfiguration.getInt(
KEY_STREAM_MIN_READ_SIZE, DEFAULT_DOWNLOAD_BLOCK_SIZE);
this.uploadBlockSizeBytes = sessionConfiguration.getInt(
KEY_WRITE_BLOCK_SIZE, DEFAULT_UPLOAD_BLOCK_SIZE);
// The job may want to specify a timeout to use when engaging the
// storage service. The default is currently 90 seconds. It may
// be necessary to increase this value for long latencies in larger
// jobs. If the timeout specified is greater than zero seconds use
// it, otherwise use the default service client timeout.
int storageConnectionTimeout = sessionConfiguration.getInt(
KEY_STORAGE_CONNECTION_TIMEOUT, 0);
if (0 < storageConnectionTimeout) {
storageInteractionLayer.setTimeoutInMs(storageConnectionTimeout * 1000);
}
// Set the concurrency values equal to the that specified in the
// configuration file. If it does not exist, set it to the default
// value calculated as double the number of CPU cores on the client
// machine. The concurrency value is minimum of double the cores and
// the read/write property.
int cpuCores = 2 * Runtime.getRuntime().availableProcessors();
concurrentWrites = sessionConfiguration.getInt(
KEY_CONCURRENT_CONNECTION_VALUE_OUT,
Math.min(cpuCores, DEFAULT_CONCURRENT_WRITES));
// Set up the exponential retry policy.
//
minBackoff = sessionConfiguration.getInt(
KEY_MIN_BACKOFF_INTERVAL, DEFAULT_MIN_BACKOFF_INTERVAL);
maxBackoff = sessionConfiguration.getInt(
KEY_MAX_BACKOFF_INTERVAL, DEFAULT_MAX_BACKOFF_INTERVAL);
deltaBackoff = sessionConfiguration.getInt(
KEY_BACKOFF_INTERVAL, DEFAULT_BACKOFF_INTERVAL);
maxRetries = sessionConfiguration.getInt(
KEY_MAX_IO_RETRIES, DEFAULT_MAX_RETRY_ATTEMPTS);
storageInteractionLayer.setRetryPolicyFactory(
new RetryExponentialRetry(minBackoff, deltaBackoff, maxBackoff, maxRetries));
// read the self-throttling config.
selfThrottlingEnabled = sessionConfiguration.getBoolean(
KEY_SELF_THROTTLE_ENABLE, DEFAULT_SELF_THROTTLE_ENABLE);
selfThrottlingReadFactor = sessionConfiguration.getFloat(
KEY_SELF_THROTTLE_READ_FACTOR, DEFAULT_SELF_THROTTLE_READ_FACTOR);
selfThrottlingWriteFactor = sessionConfiguration.getFloat(
KEY_SELF_THROTTLE_WRITE_FACTOR, DEFAULT_SELF_THROTTLE_WRITE_FACTOR);
OperationContext.setLoggingEnabledByDefault(sessionConfiguration.
getBoolean(KEY_ENABLE_STORAGE_CLIENT_LOGGING, false));
if (LOG.isDebugEnabled()) {
LOG.debug(String
.format(
"AzureNativeFileSystemStore init. Settings=%d,%b,%d,{%d,%d,%d,%d},{%b,%f,%f}",
concurrentWrites, tolerateOobAppends,
((storageConnectionTimeout > 0) ? storageConnectionTimeout
: STORAGE_CONNECTION_TIMEOUT_DEFAULT), minBackoff,
deltaBackoff, maxBackoff, maxRetries, selfThrottlingEnabled,
selfThrottlingReadFactor, selfThrottlingWriteFactor));
}
}
/**
* Connect to Azure storage using anonymous credentials.
*
* @param uri
* - URI to target blob (R/O access to public blob)
*
* @throws StorageException
* raised on errors communicating with Azure storage.
* @throws IOException
* raised on errors performing I/O or setting up the session.
* @throws URISyntaxException
* raised on creating mal-formed URI's.
*/
private void connectUsingAnonymousCredentials(final URI uri)
throws StorageException, IOException, URISyntaxException {
// Use an HTTP scheme since the URI specifies a publicly accessible
// container. Explicitly create a storage URI corresponding to the URI
// parameter for use in creating the service client.
String accountName = getAccountFromAuthority(uri);
URI storageUri = new URI(getHTTPScheme() + ":" + PATH_DELIMITER
+ PATH_DELIMITER + accountName);
// Create the service client with anonymous credentials.
String containerName = getContainerFromAuthority(uri);
storageInteractionLayer.createBlobClient(storageUri);
suppressRetryPolicyInClientIfNeeded();
// Capture the container reference.
container = storageInteractionLayer.getContainerReference(containerName);
rootDirectory = container.getDirectoryReference("");
// Check for container existence, and our ability to access it.
try {
if (!container.exists(getInstrumentedContext())) {
throw new AzureException("Container " + containerName + " in account "
+ accountName + " not found, and we can't create "
+ " it using anoynomous credentials.");
}
} catch (StorageException ex) {
throw new AzureException("Unable to access container " + containerName
+ " in account " + accountName
+ " using anonymous credentials, and no credentials found for them "
+ " in the configuration.", ex);
}
// Accessing the storage server unauthenticated using
// anonymous credentials.
isAnonymousCredentials = true;
// Configure Azure storage session.
configureAzureStorageSession();
}
private void connectUsingCredentials(String accountName,
StorageCredentials credentials, String containerName)
throws URISyntaxException, StorageException, AzureException {
URI blobEndPoint;
if (isStorageEmulatorAccount(accountName)) {
isStorageEmulator = true;
CloudStorageAccount account =
CloudStorageAccount.getDevelopmentStorageAccount();
storageInteractionLayer.createBlobClient(account);
} else {
blobEndPoint = new URI(getHTTPScheme() + "://" +
accountName);
storageInteractionLayer.createBlobClient(blobEndPoint, credentials);
}
suppressRetryPolicyInClientIfNeeded();
// Capture the container reference for debugging purposes.
container = storageInteractionLayer.getContainerReference(containerName);
rootDirectory = container.getDirectoryReference("");
// Can only create container if using account key credentials
canCreateOrModifyContainer = credentials instanceof StorageCredentialsAccountAndKey;
// Configure Azure storage session.
configureAzureStorageSession();
}
/**
* Connect to Azure storage using account key credentials.
*/
private void connectUsingConnectionStringCredentials(
final String accountName, final String containerName,
final String accountKey) throws InvalidKeyException, StorageException,
IOException, URISyntaxException {
// If the account name is "acc.blob.core.windows.net", then the
// rawAccountName is just "acc"
String rawAccountName = accountName.split("\\.")[0];
StorageCredentials credentials = new StorageCredentialsAccountAndKey(
rawAccountName, accountKey);
connectUsingCredentials(accountName, credentials, containerName);
}
/**
* Connect to Azure storage using shared access signature credentials.
*/
private void connectUsingSASCredentials(final String accountName,
final String containerName, final String sas) throws InvalidKeyException,
StorageException, IOException, URISyntaxException {
StorageCredentials credentials = new StorageCredentialsSharedAccessSignature(
sas);
connectingUsingSAS = true;
connectUsingCredentials(accountName, credentials, containerName);
}
private boolean isStorageEmulatorAccount(final String accountName) {
return accountName.equalsIgnoreCase(sessionConfiguration.get(
STORAGE_EMULATOR_ACCOUNT_NAME_PROPERTY_NAME,
DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME));
}
@VisibleForTesting
public static String getAccountKeyFromConfiguration(String accountName,
Configuration conf) throws KeyProviderException {
String key = null;
String keyProviderClass = conf.get(KEY_ACCOUNT_KEYPROVIDER_PREFIX
+ accountName);
KeyProvider keyProvider = null;
if (keyProviderClass == null) {
// No key provider was provided so use the provided key as is.
keyProvider = new SimpleKeyProvider();
} else {
// create an instance of the key provider class and verify it
// implements KeyProvider
Object keyProviderObject = null;
try {
Class<?> clazz = conf.getClassByName(keyProviderClass);
keyProviderObject = clazz.newInstance();
} catch (Exception e) {
throw new KeyProviderException("Unable to load key provider class.", e);
}
if (!(keyProviderObject instanceof KeyProvider)) {
throw new KeyProviderException(keyProviderClass
+ " specified in config is not a valid KeyProvider class.");
}
keyProvider = (KeyProvider) keyProviderObject;
}
key = keyProvider.getStorageAccountKey(accountName, conf);
return key;
}
/**
* Establish a session with Azure blob storage based on the target URI. The
* method determines whether or not the URI target contains an explicit
* account or an implicit default cluster-wide account.
*
* @throws AzureException
* @throws IOException
*/
private void createAzureStorageSession ()
throws AzureException, IOException {
// Make sure this object was properly initialized with references to
// the sessionUri and sessionConfiguration.
if (null == sessionUri || null == sessionConfiguration) {
throw new AzureException("Filesystem object not initialized properly."
+ "Unable to start session with Azure Storage server.");
}
// File system object initialized, attempt to establish a session
// with the Azure storage service for the target URI string.
try {
// Inspect the URI authority to determine the account and use the account
// to start an Azure blob client session using an account key for the
// the account or anonymously.
// For all URI's do the following checks in order:
// 1. Validate that <account> can be used with the current Hadoop
// cluster by checking it exists in the list of configured accounts
// for the cluster.
// 2. Look up the AccountKey in the list of configured accounts for the
// cluster.
// 3. If there is no AccountKey, assume anonymous public blob access
// when accessing the blob.
//
// If the URI does not specify a container use the default root container
// under the account name.
// Assertion: Container name on the session Uri should be non-null.
if (getContainerFromAuthority(sessionUri) == null) {
throw new AssertionError(String.format(
"Non-null container expected from session URI: %s.",
sessionUri.toString()));
}
// Get the account name.
String accountName = getAccountFromAuthority(sessionUri);
if (null == accountName) {
// Account name is not specified as part of the URI. Throw indicating
// an invalid account name.
final String errMsg = String.format(
"Cannot load WASB file system account name not"
+ " specified in URI: %s.", sessionUri.toString());
throw new AzureException(errMsg);
}
instrumentation.setAccountName(accountName);
String containerName = getContainerFromAuthority(sessionUri);
instrumentation.setContainerName(containerName);
// Check whether this is a storage emulator account.
if (isStorageEmulatorAccount(accountName)) {
// It is an emulator account, connect to it with no credentials.
connectUsingCredentials(accountName, null, containerName);
return;
}
// Check whether we have a shared access signature for that container.
String propertyValue = sessionConfiguration.get(KEY_ACCOUNT_SAS_PREFIX
+ containerName + "." + accountName);
if (propertyValue != null) {
// SAS was found. Connect using that.
connectUsingSASCredentials(accountName, containerName, propertyValue);
return;
}
// Check whether the account is configured with an account key.
propertyValue = getAccountKeyFromConfiguration(accountName,
sessionConfiguration);
if (propertyValue != null) {
// Account key was found.
// Create the Azure storage session using the account key and container.
connectUsingConnectionStringCredentials(
getAccountFromAuthority(sessionUri),
getContainerFromAuthority(sessionUri), propertyValue);
// Return to caller
return;
}
// The account access is not configured for this cluster. Try anonymous
// access.
connectUsingAnonymousCredentials(sessionUri);
} catch (Exception e) {
// Caught exception while attempting to initialize the Azure File
// System store, re-throw the exception.
throw new AzureException(e);
}
}
private enum ContainerState {
/**
* We haven't checked the container state yet.
*/
Unknown,
/**
* We checked and the container doesn't exist.
*/
DoesntExist,
/**
* The container exists and doesn't have an WASB version stamp on it.
*/
ExistsNoVersion,
/**
* The container exists and has an unsupported WASB version stamped on it.
*/
ExistsAtWrongVersion,
/**
* The container exists and has the proper WASB version stamped on it.
*/
ExistsAtRightVersion
}
private enum ContainerAccessType {
/**
* We're accessing the container for a pure read operation, e.g. read a
* file.
*/
PureRead,
/**
* We're accessing the container purely to write something, e.g. write a
* file.
*/
PureWrite,
/**
* We're accessing the container to read something then write, e.g. rename a
* file.
*/
ReadThenWrite
}
/**
* Trims a suffix/prefix from the given string. For example if
* s is given as "/xy" and toTrim is "/", this method returns "xy"
*/
private static String trim(String s, String toTrim) {
return StringUtils.removeEnd(StringUtils.removeStart(s, toTrim),
toTrim);
}
/**
* Checks if the given rawDir belongs to this account/container, and
* if so returns the canonicalized path for it. Otherwise return null.
*/
private String verifyAndConvertToStandardFormat(String rawDir) throws URISyntaxException {
URI asUri = new URI(rawDir);
if (asUri.getAuthority() == null
|| asUri.getAuthority().toLowerCase(Locale.ENGLISH).equalsIgnoreCase(
sessionUri.getAuthority().toLowerCase(Locale.ENGLISH))) {
// Applies to me.
return trim(asUri.getPath(), "/");
} else {
// Doen't apply to me.
return null;
}
}
/**
* Take a comma-separated list of directories from a configuration variable
* and transform it to a set of directories.
*/
private Set<String> getDirectorySet(final String configVar)
throws AzureException {
String[] rawDirs = sessionConfiguration.getStrings(configVar, new String[0]);
Set<String> directorySet = new HashSet<String>();
for (String currentDir : rawDirs) {
String myDir;
try {
myDir = verifyAndConvertToStandardFormat(currentDir);
} catch (URISyntaxException ex) {
throw new AzureException(String.format(
"The directory %s specified in the configuration entry %s is not" +
" a valid URI.",
currentDir, configVar));
}
if (myDir != null) {
directorySet.add(myDir);
}
}
return directorySet;
}
/**
* Checks if the given key in Azure Storage should be stored as a page
* blob instead of block blob.
*/
public boolean isPageBlobKey(String key) {
return isKeyForDirectorySet(key, pageBlobDirs);
}
/**
* Checks if the given key in Azure storage should have synchronized
* atomic folder rename createNonRecursive implemented.
*/
@Override
public boolean isAtomicRenameKey(String key) {
return isKeyForDirectorySet(key, atomicRenameDirs);
}
public boolean isKeyForDirectorySet(String key, Set<String> dirSet) {
String defaultFS = FileSystem.getDefaultUri(sessionConfiguration).toString();
for (String dir : dirSet) {
if (dir.isEmpty() ||
key.startsWith(dir + "/")) {
return true;
}
// Allow for blob directories with paths relative to the default file
// system.
//
try {
URI uriPageBlobDir = new URI (dir);
if (null == uriPageBlobDir.getAuthority()) {
// Concatenate the default file system prefix with the relative
// page blob directory path.
//
if (key.startsWith(trim(defaultFS, "/") + "/" + dir + "/")){
return true;
}
}
} catch (URISyntaxException e) {
LOG.info(String.format(
"URI syntax error creating URI for %s", dir));
}
}
return false;
}
/**
* This should be called from any method that does any modifications to the
* underlying container: it makes sure to put the WASB current version in the
* container's metadata if it's not already there.
*/
private ContainerState checkContainer(ContainerAccessType accessType)
throws StorageException, AzureException {
synchronized (containerStateLock) {
if (isOkContainerState(accessType)) {
return currentKnownContainerState;
}
if (currentKnownContainerState == ContainerState.ExistsAtWrongVersion) {
String containerVersion = retrieveVersionAttribute(container);
throw wrongVersionException(containerVersion);
}
// This means I didn't check it before or it didn't exist or
// we need to stamp the version. Since things may have changed by
// other machines since then, do the check again and don't depend
// on past information.
// Sanity check: we don't expect this at this point.
if (currentKnownContainerState == ContainerState.ExistsAtRightVersion) {
throw new AssertionError("Unexpected state: "
+ currentKnownContainerState);
}
// Download the attributes - doubles as an existence check with just
// one service call
try {
container.downloadAttributes(getInstrumentedContext());
currentKnownContainerState = ContainerState.Unknown;
} catch (StorageException ex) {
if (ex.getErrorCode().equals(
StorageErrorCode.RESOURCE_NOT_FOUND.toString())) {
currentKnownContainerState = ContainerState.DoesntExist;
} else {
throw ex;
}
}
if (currentKnownContainerState == ContainerState.DoesntExist) {
// If the container doesn't exist and we intend to write to it,
// create it now.
if (needToCreateContainer(accessType)) {
storeVersionAttribute(container);
container.create(getInstrumentedContext());
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
} else {
// The container exists, check the version.
String containerVersion = retrieveVersionAttribute(container);
if (containerVersion != null) {
if (containerVersion.equals(FIRST_WASB_VERSION)) {
// It's the version from when WASB was called ASV, just
// fix the version attribute if needed and proceed.
// We should be good otherwise.
if (needToStampVersion(accessType)) {
storeVersionAttribute(container);
container.uploadMetadata(getInstrumentedContext());
}
} else if (!containerVersion.equals(CURRENT_WASB_VERSION)) {
// Don't know this version - throw.
currentKnownContainerState = ContainerState.ExistsAtWrongVersion;
throw wrongVersionException(containerVersion);
} else {
// It's our correct version.
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
} else {
// No version info exists.
currentKnownContainerState = ContainerState.ExistsNoVersion;
if (needToStampVersion(accessType)) {
// Need to stamp the version
storeVersionAttribute(container);
container.uploadMetadata(getInstrumentedContext());
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
}
}
return currentKnownContainerState;
}
}
private AzureException wrongVersionException(String containerVersion) {
return new AzureException("The container " + container.getName()
+ " is at an unsupported version: " + containerVersion
+ ". Current supported version: " + FIRST_WASB_VERSION);
}
private boolean needToStampVersion(ContainerAccessType accessType) {
// We need to stamp the version on the container any time we write to
// it and we have the correct credentials to be able to write container
// metadata.
return accessType != ContainerAccessType.PureRead
&& canCreateOrModifyContainer;
}
private static boolean needToCreateContainer(ContainerAccessType accessType) {
// We need to pro-actively create the container (if it doesn't exist) if
// we're doing a pure write. No need to create it for pure read or read-
// then-write access.
return accessType == ContainerAccessType.PureWrite;
}
// Determines whether we have to pull the container information again
// or we can work based off what we already have.
private boolean isOkContainerState(ContainerAccessType accessType) {
switch (currentKnownContainerState) {
case Unknown:
// When using SAS, we can't discover container attributes
// so just live with Unknown state and fail later if it
// doesn't exist.
return connectingUsingSAS;
case DoesntExist:
return false; // the container could have been created
case ExistsAtRightVersion:
return true; // fine to optimize
case ExistsAtWrongVersion:
return false;
case ExistsNoVersion:
// If there's no version, it's OK if we don't need to stamp the version
// or we can't anyway even if we wanted to.
return !needToStampVersion(accessType);
default:
throw new AssertionError("Unknown access type: " + accessType);
}
}
private boolean getUseTransactionalContentMD5() {
return sessionConfiguration.getBoolean(KEY_CHECK_BLOCK_MD5, true);
}
private BlobRequestOptions getUploadOptions() {
BlobRequestOptions options = new BlobRequestOptions();
options.setStoreBlobContentMD5(sessionConfiguration.getBoolean(
KEY_STORE_BLOB_MD5, false));
options.setUseTransactionalContentMD5(getUseTransactionalContentMD5());
options.setConcurrentRequestCount(concurrentWrites);
options.setRetryPolicyFactory(new RetryExponentialRetry(minBackoff,
deltaBackoff, maxBackoff, maxRetries));
return options;
}
private BlobRequestOptions getDownloadOptions() {
BlobRequestOptions options = new BlobRequestOptions();
options.setRetryPolicyFactory(
new RetryExponentialRetry(minBackoff, deltaBackoff, maxBackoff, maxRetries));
options.setUseTransactionalContentMD5(getUseTransactionalContentMD5());
return options;
}
@Override
public DataOutputStream storefile(String key, PermissionStatus permissionStatus)
throws AzureException {
try {
// Check if a session exists, if not create a session with the
// Azure storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AzureException(errMsg);
}
// Check if there is an authenticated account associated with the
// file this instance of the WASB file system. If not the file system
// has not been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// Preemptively raise an exception indicating no uploads are
// allowed to anonymous accounts.
throw new AzureException(new IOException(
"Uploads to public accounts using anonymous "
+ "access is prohibited."));
}
checkContainer(ContainerAccessType.PureWrite);
/**
* Note: Windows Azure Blob Storage does not allow the creation of arbitrary directory
* paths under the default $root directory. This is by design to eliminate
* ambiguity in specifying a implicit blob address. A blob in the $root conatiner
* cannot include a / in its name and must be careful not to include a trailing
* '/' when referencing blobs in the $root container.
* A '/; in the $root container permits ambiguous blob names as in the following
* example involving two containers $root and mycontainer:
* http://myaccount.blob.core.windows.net/$root
* http://myaccount.blob.core.windows.net/mycontainer
* If the URL "mycontainer/somefile.txt were allowed in $root then the URL:
* http://myaccount.blob.core.windows.net/mycontainer/myblob.txt
* could mean either:
* (1) container=mycontainer; blob=myblob.txt
* (2) container=$root; blob=mycontainer/myblob.txt
*
* To avoid this type of ambiguity the Azure blob storage prevents
* arbitrary path under $root. For a simple and more consistent user
* experience it was decided to eliminate the opportunity for creating
* such paths by making the $root container read-only under WASB.
*/
// Check that no attempt is made to write to blobs on default
// $root containers.
if (AZURE_ROOT_CONTAINER.equals(getContainerFromAuthority(sessionUri))) {
// Azure containers are restricted to non-root containers.
final String errMsg = String.format(
"Writes to '%s' container for URI '%s' are prohibited, "
+ "only updates on non-root containers permitted.",
AZURE_ROOT_CONTAINER, sessionUri.toString());
throw new AzureException(errMsg);
}
// Get the blob reference from the store's container and
// return it.
CloudBlobWrapper blob = getBlobReference(key);
storePermissionStatus(blob, permissionStatus);
// Create the output stream for the Azure blob.
//
OutputStream outputStream = openOutputStream(blob);
DataOutputStream dataOutStream = new SyncableDataOutputStream(outputStream);
return dataOutStream;
} catch (Exception e) {
// Caught exception while attempting to open the blob output stream.
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
/**
* Opens a new output stream to the given blob (page or block blob)
* to populate it from scratch with data.
*/
private OutputStream openOutputStream(final CloudBlobWrapper blob)
throws StorageException {
if (blob instanceof CloudPageBlobWrapperImpl){
return new PageBlobOutputStream(
(CloudPageBlobWrapper)blob, getInstrumentedContext(), sessionConfiguration);
} else {
// Handle both ClouldBlockBlobWrapperImpl and (only for the test code path)
// MockCloudBlockBlobWrapper.
return ((CloudBlockBlobWrapper) blob).openOutputStream(getUploadOptions(),
getInstrumentedContext());
}
}
/**
* Opens a new input stream for the given blob (page or block blob)
* to read its data.
*/
private InputStream openInputStream(CloudBlobWrapper blob)
throws StorageException, IOException {
if (blob instanceof CloudBlockBlobWrapper) {
return blob.openInputStream(getDownloadOptions(),
getInstrumentedContext(isConcurrentOOBAppendAllowed()));
} else {
return new PageBlobInputStream(
(CloudPageBlobWrapper) blob, getInstrumentedContext(
isConcurrentOOBAppendAllowed()));
}
}
/**
* Default permission to use when no permission metadata is found.
*
* @return The default permission to use.
*/
private static PermissionStatus defaultPermissionNoBlobMetadata() {
return new PermissionStatus("", "", FsPermission.getDefault());
}
private static void storeMetadataAttribute(CloudBlobWrapper blob,
String key, String value) {
HashMap<String, String> metadata = blob.getMetadata();
if (null == metadata) {
metadata = new HashMap<String, String>();
}
metadata.put(key, value);
blob.setMetadata(metadata);
}
private static String getMetadataAttribute(CloudBlobWrapper blob,
String... keyAlternatives) {
HashMap<String, String> metadata = blob.getMetadata();
if (null == metadata) {
return null;
}
for (String key : keyAlternatives) {
if (metadata.containsKey(key)) {
return metadata.get(key);
}
}
return null;
}
private static void removeMetadataAttribute(CloudBlobWrapper blob,
String key) {
HashMap<String, String> metadata = blob.getMetadata();
if (metadata != null) {
metadata.remove(key);
blob.setMetadata(metadata);
}
}
private static void storePermissionStatus(CloudBlobWrapper blob,
PermissionStatus permissionStatus) {
storeMetadataAttribute(blob, PERMISSION_METADATA_KEY,
PERMISSION_JSON_SERIALIZER.toJSON(permissionStatus));
// Remove the old metadata key if present
removeMetadataAttribute(blob, OLD_PERMISSION_METADATA_KEY);
}
private PermissionStatus getPermissionStatus(CloudBlobWrapper blob) {
String permissionMetadataValue = getMetadataAttribute(blob,
PERMISSION_METADATA_KEY, OLD_PERMISSION_METADATA_KEY);
if (permissionMetadataValue != null) {
return PermissionStatusJsonSerializer.fromJSONString(
permissionMetadataValue);
} else {
return defaultPermissionNoBlobMetadata();
}
}
private static void storeFolderAttribute(CloudBlobWrapper blob) {
storeMetadataAttribute(blob, IS_FOLDER_METADATA_KEY, "true");
// Remove the old metadata key if present
removeMetadataAttribute(blob, OLD_IS_FOLDER_METADATA_KEY);
}
private static void storeLinkAttribute(CloudBlobWrapper blob,
String linkTarget) throws UnsupportedEncodingException {
// We have to URL encode the link attribute as the link URI could
// have URI special characters which unless encoded will result
// in 403 errors from the server. This is due to metadata properties
// being sent in the HTTP header of the request which is in turn used
// on the server side to authorize the request.
String encodedLinkTarget = null;
if (linkTarget != null) {
encodedLinkTarget = URLEncoder.encode(linkTarget, "UTF-8");
}
storeMetadataAttribute(blob,
LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY,
encodedLinkTarget);
// Remove the old metadata key if present
removeMetadataAttribute(blob,
OLD_LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
}
private static String getLinkAttributeValue(CloudBlobWrapper blob)
throws UnsupportedEncodingException {
String encodedLinkTarget = getMetadataAttribute(blob,
LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY,
OLD_LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
String linkTarget = null;
if (encodedLinkTarget != null) {
linkTarget = URLDecoder.decode(encodedLinkTarget, "UTF-8");
}
return linkTarget;
}
private static boolean retrieveFolderAttribute(CloudBlobWrapper blob) {
HashMap<String, String> metadata = blob.getMetadata();
return null != metadata
&& (metadata.containsKey(IS_FOLDER_METADATA_KEY) || metadata
.containsKey(OLD_IS_FOLDER_METADATA_KEY));
}
private static void storeVersionAttribute(CloudBlobContainerWrapper container) {
HashMap<String, String> metadata = container.getMetadata();
if (null == metadata) {
metadata = new HashMap<String, String>();
}
metadata.put(VERSION_METADATA_KEY, CURRENT_WASB_VERSION);
if (metadata.containsKey(OLD_VERSION_METADATA_KEY)) {
metadata.remove(OLD_VERSION_METADATA_KEY);
}
container.setMetadata(metadata);
}
private static String retrieveVersionAttribute(
CloudBlobContainerWrapper container) {
HashMap<String, String> metadata = container.getMetadata();
if (metadata == null) {
return null;
} else if (metadata.containsKey(VERSION_METADATA_KEY)) {
return metadata.get(VERSION_METADATA_KEY);
} else if (metadata.containsKey(OLD_VERSION_METADATA_KEY)) {
return metadata.get(OLD_VERSION_METADATA_KEY);
} else {
return null;
}
}
@Override
public void storeEmptyFolder(String key, PermissionStatus permissionStatus)
throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
// Check if there is an authenticated account associated with the file
// this instance of the WASB file system. If not the file system has not
// been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// Preemptively raise an exception indicating no uploads are
// allowed to anonymous accounts.
throw new AzureException(
"Uploads to to public accounts using anonymous access is prohibited.");
}
try {
checkContainer(ContainerAccessType.PureWrite);
CloudBlobWrapper blob = getBlobReference(key);
storePermissionStatus(blob, permissionStatus);
storeFolderAttribute(blob);
openOutputStream(blob).close();
} catch (Exception e) {
// Caught exception while attempting upload. Re-throw as an Azure
// storage exception.
throw new AzureException(e);
}
}
/**
* Stores an empty blob that's linking to the temporary file where're we're
* uploading the initial data.
*/
@Override
public void storeEmptyLinkFile(String key, String tempBlobKey,
PermissionStatus permissionStatus) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
// Check if there is an authenticated account associated with the file
// this instance of the WASB file system. If not the file system has not
// been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// Preemptively raise an exception indicating no uploads are
// allowed to anonymous accounts.
throw new AzureException(
"Uploads to to public accounts using anonymous access is prohibited.");
}
try {
checkContainer(ContainerAccessType.PureWrite);
CloudBlobWrapper blob = getBlobReference(key);
storePermissionStatus(blob, permissionStatus);
storeLinkAttribute(blob, tempBlobKey);
openOutputStream(blob).close();
} catch (Exception e) {
// Caught exception while attempting upload. Re-throw as an Azure
// storage exception.
throw new AzureException(e);
}
}
/**
* If the blob with the given key exists and has a link in its metadata to a
* temporary file (see storeEmptyLinkFile), this method returns the key to
* that temporary file. Otherwise, returns null.
*/
@Override
public String getLinkInFileMetadata(String key) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
try {
checkContainer(ContainerAccessType.PureRead);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
return getLinkAttributeValue(blob);
} catch (Exception e) {
// Caught exception while attempting download. Re-throw as an Azure
// storage exception.
throw new AzureException(e);
}
}
/**
* Private method to check for authenticated access.
*
* @ returns boolean -- true if access is credentialed and authenticated and
* false otherwise.
*/
private boolean isAuthenticatedAccess() throws AzureException {
if (isAnonymousCredentials) {
// Access to this storage account is unauthenticated.
return false;
}
// Access is authenticated.
return true;
}
/**
* This private method uses the root directory or the original container to
* list blobs under the directory or container depending on whether the
* original file system object was constructed with a short- or long-form URI.
* If the root directory is non-null the URI in the file constructor was in
* the long form.
*
* @param includeMetadata
* if set, the listed items will have their metadata populated
* already.
*
* @returns blobItems : iterable collection of blob items.
* @throws URISyntaxException
*
*/
private Iterable<ListBlobItem> listRootBlobs(boolean includeMetadata)
throws StorageException, URISyntaxException {
return rootDirectory.listBlobs(
null, false,
includeMetadata ?
EnumSet.of(BlobListingDetails.METADATA) :
EnumSet.noneOf(BlobListingDetails.class),
null,
getInstrumentedContext());
}
/**
* This private method uses the root directory or the original container to
* list blobs under the directory or container given a specified prefix for
* the directory depending on whether the original file system object was
* constructed with a short- or long-form URI. If the root directory is
* non-null the URI in the file constructor was in the long form.
*
* @param aPrefix
* : string name representing the prefix of containing blobs.
* @param includeMetadata
* if set, the listed items will have their metadata populated
* already.
*
* @returns blobItems : iterable collection of blob items.
* @throws URISyntaxException
*
*/
private Iterable<ListBlobItem> listRootBlobs(String aPrefix,
boolean includeMetadata) throws StorageException, URISyntaxException {
Iterable<ListBlobItem> list = rootDirectory.listBlobs(aPrefix,
false,
includeMetadata ?
EnumSet.of(BlobListingDetails.METADATA) :
EnumSet.noneOf(BlobListingDetails.class),
null,
getInstrumentedContext());
return list;
}
/**
* This private method uses the root directory or the original container to
* list blobs under the directory or container given a specified prefix for
* the directory depending on whether the original file system object was
* constructed with a short- or long-form URI. It also uses the specified flat
* or hierarchical option, listing details options, request options, and
* operation context.
*
* @param aPrefix
* string name representing the prefix of containing blobs.
* @param useFlatBlobListing
* - the list is flat if true, or hierarchical otherwise.
* @param listingDetails
* - determine whether snapshots, metadata, committed/uncommitted
* data
* @param options
* - object specifying additional options for the request. null =
* default options
* @param opContext
* - context of the current operation
* @returns blobItems : iterable collection of blob items.
* @throws URISyntaxException
*
*/
private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean useFlatBlobListing,
EnumSet<BlobListingDetails> listingDetails, BlobRequestOptions options,
OperationContext opContext) throws StorageException, URISyntaxException {
CloudBlobDirectoryWrapper directory = this.container.getDirectoryReference(aPrefix);
return directory.listBlobs(
null,
useFlatBlobListing,
listingDetails,
options,
opContext);
}
/**
* This private method uses the root directory or the original container to
* get the block blob reference depending on whether the original file system
* object was constructed with a short- or long-form URI. If the root
* directory is non-null the URI in the file constructor was in the long form.
*
* @param aKey
* : a key used to query Azure for the block blob.
* @returns blob : a reference to the Azure block blob corresponding to the
* key.
* @throws URISyntaxException
*
*/
private CloudBlobWrapper getBlobReference(String aKey)
throws StorageException, URISyntaxException {
CloudBlobWrapper blob = null;
if (isPageBlobKey(aKey)) {
blob = this.container.getPageBlobReference(aKey);
} else {
blob = this.container.getBlockBlobReference(aKey);
blob.setStreamMinimumReadSizeInBytes(downloadBlockSizeBytes);
blob.setWriteBlockSizeInBytes(uploadBlockSizeBytes);
}
return blob;
}
/**
* This private method normalizes the key by stripping the container name from
* the path and returns a path relative to the root directory of the
* container.
*
* @param keyUri
* - adjust this key to a path relative to the root directory
*
* @returns normKey
*/
private String normalizeKey(URI keyUri) {
String normKey;
// Strip the container name from the path and return the path
// relative to the root directory of the container.
int parts = isStorageEmulator ? 4 : 3;
normKey = keyUri.getPath().split("/", parts)[(parts - 1)];
// Return the fixed key.
return normKey;
}
/**
* This private method normalizes the key by stripping the container name from
* the path and returns a path relative to the root directory of the
* container.
*
* @param blob
* - adjust the key to this blob to a path relative to the root
* directory
*
* @returns normKey
*/
private String normalizeKey(CloudBlobWrapper blob) {
return normalizeKey(blob.getUri());
}
/**
* This private method normalizes the key by stripping the container name from
* the path and returns a path relative to the root directory of the
* container.
*
* @param directory
* - adjust the key to this directory to a path relative to the root
* directory
*
* @returns normKey
*/
private String normalizeKey(CloudBlobDirectoryWrapper directory) {
String dirKey = normalizeKey(directory.getUri());
// Strip the last delimiter
if (dirKey.endsWith(PATH_DELIMITER)) {
dirKey = dirKey.substring(0, dirKey.length() - 1);
}
return dirKey;
}
/**
* Default method to creates a new OperationContext for the Azure Storage
* operation that has listeners hooked to it that will update the metrics for
* this file system. This method does not bind to receive send request
* callbacks by default.
*
* @return The OperationContext object to use.
*/
private OperationContext getInstrumentedContext() {
// Default is to not bind to receive send callback events.
return getInstrumentedContext(false);
}
/**
* Creates a new OperationContext for the Azure Storage operation that has
* listeners hooked to it that will update the metrics for this file system.
*
* @param bindConcurrentOOBIo
* - bind to intercept send request call backs to handle OOB I/O.
*
* @return The OperationContext object to use.
*/
private OperationContext getInstrumentedContext(boolean bindConcurrentOOBIo) {
OperationContext operationContext = new OperationContext();
if (selfThrottlingEnabled) {
SelfThrottlingIntercept.hook(operationContext, selfThrottlingReadFactor,
selfThrottlingWriteFactor);
}
if(bandwidthGaugeUpdater != null) {
//bandwidthGaugeUpdater is null when we config to skip azure metrics
ResponseReceivedMetricUpdater.hook(
operationContext,
instrumentation,
bandwidthGaugeUpdater);
}
// Bind operation context to receive send request callbacks on this operation.
// If reads concurrent to OOB writes are allowed, the interception will reset
// the conditional header on all Azure blob storage read requests.
if (bindConcurrentOOBIo) {
SendRequestIntercept.bind(storageInteractionLayer.getCredentials(),
operationContext, true);
}
if (testHookOperationContext != null) {
operationContext =
testHookOperationContext.modifyOperationContext(operationContext);
}
ErrorMetricUpdater.hook(operationContext, instrumentation);
// Return the operation context.
return operationContext;
}
@Override
public FileMetadata retrieveMetadata(String key) throws IOException {
// Attempts to check status may occur before opening any streams so first,
// check if a session exists, if not create a session with the Azure storage
// server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Retrieving metadata for " + key);
}
try {
if (checkContainer(ContainerAccessType.PureRead) == ContainerState.DoesntExist) {
// The container doesn't exist, so spare some service calls and just
// return null now.
return null;
}
// Handle the degenerate cases where the key does not exist or the
// key is a container.
if (key.equals("/")) {
// The key refers to root directory of container.
// Set the modification time for root to zero.
return new FileMetadata(key, 0, defaultPermissionNoBlobMetadata(),
BlobMaterialization.Implicit);
}
CloudBlobWrapper blob = getBlobReference(key);
// Download attributes and return file metadata only if the blob
// exists.
if (null != blob && blob.exists(getInstrumentedContext())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found " + key
+ " as an explicit blob. Checking if it's a file or folder.");
}
// The blob exists, so capture the metadata from the blob
// properties.
blob.downloadAttributes(getInstrumentedContext());
BlobProperties properties = blob.getProperties();
if (retrieveFolderAttribute(blob)) {
if (LOG.isDebugEnabled()) {
LOG.debug(key + " is a folder blob.");
}
return new FileMetadata(key, properties.getLastModified().getTime(),
getPermissionStatus(blob), BlobMaterialization.Explicit);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(key + " is a normal blob.");
}
return new FileMetadata(
key, // Always return denormalized key with metadata.
getDataLength(blob, properties),
properties.getLastModified().getTime(),
getPermissionStatus(blob));
}
}
// There is no file with that key name, but maybe it is a folder.
// Query the underlying folder/container to list the blobs stored
// there under that key.
//
Iterable<ListBlobItem> objects =
listRootBlobs(
key,
true,
EnumSet.of(BlobListingDetails.METADATA),
null,
getInstrumentedContext());
// Check if the directory/container has the blob items.
for (ListBlobItem blobItem : objects) {
if (blobItem instanceof CloudBlockBlobWrapper
|| blobItem instanceof CloudPageBlobWrapper) {
LOG.debug("Found blob as a directory-using this file under it to infer its properties "
+ blobItem.getUri());
blob = (CloudBlobWrapper) blobItem;
// The key specifies a directory. Create a FileMetadata object which
// specifies as such.
BlobProperties properties = blob.getProperties();
return new FileMetadata(key, properties.getLastModified().getTime(),
getPermissionStatus(blob), BlobMaterialization.Implicit);
}
}
// Return to caller with a null metadata object.
return null;
} catch (Exception e) {
// Re-throw the exception as an Azure storage exception.
throw new AzureException(e);
}
}
@Override
public DataInputStream retrieve(String key) throws AzureException, IOException {
try {
// Check if a session exists, if not create a session with the
// Azure storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.PureRead);
// Get blob reference and open the input buffer stream.
CloudBlobWrapper blob = getBlobReference(key);
BufferedInputStream inBufStream = new BufferedInputStream(
openInputStream(blob));
// Return a data input stream.
DataInputStream inDataStream = new DataInputStream(inBufStream);
return inDataStream;
} catch (Exception e) {
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
@Override
public DataInputStream retrieve(String key, long startByteOffset)
throws AzureException, IOException {
try {
// Check if a session exists, if not create a session with the
// Azure storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.PureRead);
// Get blob reference and open the input buffer stream.
CloudBlobWrapper blob = getBlobReference(key);
// Open input stream and seek to the start offset.
InputStream in = blob.openInputStream(
getDownloadOptions(), getInstrumentedContext(isConcurrentOOBAppendAllowed()));
// Create a data input stream.
DataInputStream inDataStream = new DataInputStream(in);
// Skip bytes and ignore return value. This is okay
// because if you try to skip too far you will be positioned
// at the end and reads will not return data.
inDataStream.skip(startByteOffset);
return inDataStream;
} catch (Exception e) {
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
@Override
public PartialListing list(String prefix, final int maxListingCount,
final int maxListingDepth) throws IOException {
return list(prefix, maxListingCount, maxListingDepth, null);
}
@Override
public PartialListing list(String prefix, final int maxListingCount,
final int maxListingDepth, String priorLastKey) throws IOException {
return list(prefix, PATH_DELIMITER, maxListingCount, maxListingDepth,
priorLastKey);
}
@Override
public PartialListing listAll(String prefix, final int maxListingCount,
final int maxListingDepth, String priorLastKey) throws IOException {
return list(prefix, null, maxListingCount, maxListingDepth, priorLastKey);
}
/**
* Searches the given list of {@link FileMetadata} objects for a directory
* with the given key.
*
* @param list
* The list to search.
* @param key
* The key to search for.
* @return The wanted directory, or null if not found.
*/
private static FileMetadata getDirectoryInList(
final Iterable<FileMetadata> list, String key) {
for (FileMetadata current : list) {
if (current.isDir() && current.getKey().equals(key)) {
return current;
}
}
return null;
}
private PartialListing list(String prefix, String delimiter,
final int maxListingCount, final int maxListingDepth, String priorLastKey)
throws IOException {
try {
checkContainer(ContainerAccessType.PureRead);
if (0 < prefix.length() && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
Iterable<ListBlobItem> objects;
if (prefix.equals("/")) {
objects = listRootBlobs(true);
} else {
objects = listRootBlobs(prefix, true);
}
ArrayList<FileMetadata> fileMetadata = new ArrayList<FileMetadata>();
for (ListBlobItem blobItem : objects) {
// Check that the maximum listing count is not exhausted.
//
if (0 < maxListingCount
&& fileMetadata.size() >= maxListingCount) {
break;
}
if (blobItem instanceof CloudBlockBlobWrapper || blobItem instanceof CloudPageBlobWrapper) {
String blobKey = null;
CloudBlobWrapper blob = (CloudBlobWrapper) blobItem;
BlobProperties properties = blob.getProperties();
// Determine format of the blob name depending on whether an absolute
// path is being used or not.
blobKey = normalizeKey(blob);
FileMetadata metadata;
if (retrieveFolderAttribute(blob)) {
metadata = new FileMetadata(blobKey,
properties.getLastModified().getTime(),
getPermissionStatus(blob),
BlobMaterialization.Explicit);
} else {
metadata = new FileMetadata(
blobKey,
getDataLength(blob, properties),
properties.getLastModified().getTime(),
getPermissionStatus(blob));
}
// Add the metadata to the list, but remove any existing duplicate
// entries first that we may have added by finding nested files.
FileMetadata existing = getDirectoryInList(fileMetadata, blobKey);
if (existing != null) {
fileMetadata.remove(existing);
}
fileMetadata.add(metadata);
} else if (blobItem instanceof CloudBlobDirectoryWrapper) {
CloudBlobDirectoryWrapper directory = (CloudBlobDirectoryWrapper) blobItem;
// Determine format of directory name depending on whether an absolute
// path is being used or not.
//
String dirKey = normalizeKey(directory);
// Strip the last /
if (dirKey.endsWith(PATH_DELIMITER)) {
dirKey = dirKey.substring(0, dirKey.length() - 1);
}
// Reached the targeted listing depth. Return metadata for the
// directory using default permissions.
//
// Note: Something smarter should be done about permissions. Maybe
// inherit the permissions of the first non-directory blob.
// Also, getting a proper value for last-modified is tricky.
FileMetadata directoryMetadata = new FileMetadata(dirKey, 0,
defaultPermissionNoBlobMetadata(), BlobMaterialization.Implicit);
// Add the directory metadata to the list only if it's not already
// there.
if (getDirectoryInList(fileMetadata, dirKey) == null) {
fileMetadata.add(directoryMetadata);
}
// Currently at a depth of one, decrement the listing depth for
// sub-directories.
buildUpList(directory, fileMetadata, maxListingCount,
maxListingDepth - 1);
}
}
// Note: Original code indicated that this may be a hack.
priorLastKey = null;
PartialListing listing = new PartialListing(priorLastKey,
fileMetadata.toArray(new FileMetadata[] {}),
0 == fileMetadata.size() ? new String[] {}
: new String[] { prefix });
return listing;
} catch (Exception e) {
// Re-throw as an Azure storage exception.
//
throw new AzureException(e);
}
}
/**
* Build up a metadata list of blobs in an Azure blob directory. This method
* uses a in-order first traversal of blob directory structures to maintain
* the sorted order of the blob names.
*
* @param aCloudBlobDirectory Azure blob directory
* @param aFileMetadataList a list of file metadata objects for each
* non-directory blob.
* @param maxListingCount maximum length of the built up list.
*/
private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory,
ArrayList<FileMetadata> aFileMetadataList, final int maxListingCount,
final int maxListingDepth) throws Exception {
// Push the blob directory onto the stack.
//
AzureLinkedStack<Iterator<ListBlobItem>> dirIteratorStack =
new AzureLinkedStack<Iterator<ListBlobItem>>();
Iterable<ListBlobItem> blobItems = aCloudBlobDirectory.listBlobs(null,
false, EnumSet.of(BlobListingDetails.METADATA), null,
getInstrumentedContext());
Iterator<ListBlobItem> blobItemIterator = blobItems.iterator();
if (0 == maxListingDepth || 0 == maxListingCount) {
// Recurrence depth and listing count are already exhausted. Return
// immediately.
return;
}
// The directory listing depth is unbounded if the maximum listing depth
// is negative.
final boolean isUnboundedDepth = (maxListingDepth < 0);
// Reset the current directory listing depth.
int listingDepth = 1;
// Loop until all directories have been traversed in-order. Loop only
// the following conditions are satisfied:
// (1) The stack is not empty, and
// (2) maxListingCount > 0 implies that the number of items in the
// metadata list is less than the max listing count.
while (null != blobItemIterator
&& (maxListingCount <= 0 || aFileMetadataList.size() < maxListingCount)) {
while (blobItemIterator.hasNext()) {
// Check if the count of items on the list exhausts the maximum
// listing count.
//
if (0 < maxListingCount && aFileMetadataList.size() >= maxListingCount) {
break;
}
ListBlobItem blobItem = blobItemIterator.next();
// Add the file metadata to the list if this is not a blob
// directory item.
//
if (blobItem instanceof CloudBlockBlobWrapper || blobItem instanceof CloudPageBlobWrapper) {
String blobKey = null;
CloudBlobWrapper blob = (CloudBlobWrapper) blobItem;
BlobProperties properties = blob.getProperties();
// Determine format of the blob name depending on whether an absolute
// path is being used or not.
blobKey = normalizeKey(blob);
FileMetadata metadata;
if (retrieveFolderAttribute(blob)) {
metadata = new FileMetadata(blobKey,
properties.getLastModified().getTime(),
getPermissionStatus(blob),
BlobMaterialization.Explicit);
} else {
metadata = new FileMetadata(
blobKey,
getDataLength(blob, properties),
properties.getLastModified().getTime(),
getPermissionStatus(blob));
}
// Add the directory metadata to the list only if it's not already
// there.
FileMetadata existing = getDirectoryInList(aFileMetadataList, blobKey);
if (existing != null) {
aFileMetadataList.remove(existing);
}
aFileMetadataList.add(metadata);
} else if (blobItem instanceof CloudBlobDirectoryWrapper) {
CloudBlobDirectoryWrapper directory = (CloudBlobDirectoryWrapper) blobItem;
// This is a directory blob, push the current iterator onto
// the stack of iterators and start iterating through the current
// directory.
if (isUnboundedDepth || maxListingDepth > listingDepth) {
// Push the current directory on the stack and increment the listing
// depth.
dirIteratorStack.push(blobItemIterator);
++listingDepth;
// The current blob item represents the new directory. Get
// an iterator for this directory and continue by iterating through
// this directory.
blobItems = directory.listBlobs(null, false,
EnumSet.noneOf(BlobListingDetails.class), null,
getInstrumentedContext());
blobItemIterator = blobItems.iterator();
} else {
// Determine format of directory name depending on whether an
// absolute path is being used or not.
String dirKey = normalizeKey(directory);
if (getDirectoryInList(aFileMetadataList, dirKey) == null) {
// Reached the targeted listing depth. Return metadata for the
// directory using default permissions.
//
// Note: Something smarter should be done about permissions. Maybe
// inherit the permissions of the first non-directory blob.
// Also, getting a proper value for last-modified is tricky.
//
FileMetadata directoryMetadata = new FileMetadata(dirKey,
0,
defaultPermissionNoBlobMetadata(),
BlobMaterialization.Implicit);
// Add the directory metadata to the list.
aFileMetadataList.add(directoryMetadata);
}
}
}
}
// Traversal of directory tree
// Check if the iterator stack is empty. If it is set the next blob
// iterator to null. This will act as a terminator for the for-loop.
// Otherwise pop the next iterator from the stack and continue looping.
//
if (dirIteratorStack.isEmpty()) {
blobItemIterator = null;
} else {
// Pop the next directory item from the stack and decrement the
// depth.
blobItemIterator = dirIteratorStack.pop();
--listingDepth;
// Assertion: Listing depth should not be less than zero.
if (listingDepth < 0) {
throw new AssertionError("Non-negative listing depth expected");
}
}
}
}
/**
* Return the actual data length of the blob with the specified properties.
* If it is a page blob, you can't rely on the length from the properties
* argument and you must get it from the file. Otherwise, you can.
*/
private long getDataLength(CloudBlobWrapper blob, BlobProperties properties)
throws AzureException {
if (blob instanceof CloudPageBlobWrapper) {
try {
return PageBlobInputStream.getPageBlobDataSize((CloudPageBlobWrapper) blob,
getInstrumentedContext(
isConcurrentOOBAppendAllowed()));
} catch (Exception e) {
throw new AzureException(
"Unexpected exception getting page blob actual data size.", e);
}
}
return properties.getLength();
}
/**
* Deletes the given blob, taking special care that if we get a
* blob-not-found exception upon retrying the operation, we just
* swallow the error since what most probably happened is that
* the first operation succeeded on the server.
* @param blob The blob to delete.
* @param lease Azure blob lease, or null if no lease is to be used.
* @throws StorageException
*/
private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException {
OperationContext operationContext = getInstrumentedContext();
try {
blob.delete(operationContext, lease);
} catch (StorageException e) {
// On exception, check that if:
// 1. It's a BlobNotFound exception AND
// 2. It got there after one-or-more retries THEN
// we swallow the exception.
if (e.getErrorCode() != null &&
e.getErrorCode().equals("BlobNotFound") &&
operationContext.getRequestResults().size() > 1 &&
operationContext.getRequestResults().get(0).getException() != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Swallowing delete exception on retry: " + e.getMessage());
}
return;
} else {
throw e;
}
} finally {
if (lease != null) {
lease.free();
}
}
}
@Override
public void delete(String key, SelfRenewingLease lease) throws IOException {
try {
if (checkContainer(ContainerAccessType.ReadThenWrite) == ContainerState.DoesntExist) {
// Container doesn't exist, no need to do anything
return;
}
// Get the blob reference and delete it.
CloudBlobWrapper blob = getBlobReference(key);
if (blob.exists(getInstrumentedContext())) {
safeDelete(blob, lease);
}
} catch (Exception e) {
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
@Override
public void delete(String key) throws IOException {
delete(key, null);
}
@Override
public void rename(String srcKey, String dstKey) throws IOException {
rename(srcKey, dstKey, false, null);
}
@Override
public void rename(String srcKey, String dstKey, boolean acquireLease,
SelfRenewingLease existingLease) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Moving " + srcKey + " to " + dstKey);
}
if (acquireLease && existingLease != null) {
throw new IOException("Cannot acquire new lease if one already exists.");
}
try {
// Attempts rename may occur before opening any streams so first,
// check if a session exists, if not create a session with the Azure
// storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.ReadThenWrite);
// Get the source blob and assert its existence. If the source key
// needs to be normalized then normalize it.
//
CloudBlobWrapper srcBlob = getBlobReference(srcKey);
if (!srcBlob.exists(getInstrumentedContext())) {
throw new AzureException ("Source blob " + srcKey +
" does not exist.");
}
/**
* Conditionally get a lease on the source blob to prevent other writers
* from changing it. This is used for correctness in HBase when log files
* are renamed. It generally should do no harm other than take a little
* more time for other rename scenarios. When the HBase master renames a
* log file folder, the lease locks out other writers. This
* prevents a region server that the master thinks is dead, but is still
* alive, from committing additional updates. This is different than
* when HBase runs on HDFS, where the region server recovers the lease
* on a log file, to gain exclusive access to it, before it splits it.
*/
SelfRenewingLease lease = null;
if (acquireLease) {
lease = srcBlob.acquireLease();
} else if (existingLease != null) {
lease = existingLease;
}
// Get the destination blob. The destination key always needs to be
// normalized.
//
CloudBlobWrapper dstBlob = getBlobReference(dstKey);
// Rename the source blob to the destination blob by copying it to
// the destination blob then deleting it.
//
// Copy blob operation in Azure storage is very costly. It will be highly
// likely throttled during Azure storage gc. Short term fix will be using
// a more intensive exponential retry policy when the cluster is getting
// throttled.
try {
dstBlob.startCopyFromBlob(srcBlob, null, getInstrumentedContext());
} catch (StorageException se) {
if (se.getErrorCode().equals(
StorageErrorCode.SERVER_BUSY.toString())) {
int copyBlobMinBackoff = sessionConfiguration.getInt(
KEY_COPYBLOB_MIN_BACKOFF_INTERVAL,
DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL);
int copyBlobMaxBackoff = sessionConfiguration.getInt(
KEY_COPYBLOB_MAX_BACKOFF_INTERVAL,
DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL);
int copyBlobDeltaBackoff = sessionConfiguration.getInt(
KEY_COPYBLOB_BACKOFF_INTERVAL,
DEFAULT_COPYBLOB_BACKOFF_INTERVAL);
int copyBlobMaxRetries = sessionConfiguration.getInt(
KEY_COPYBLOB_MAX_IO_RETRIES,
DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS);
BlobRequestOptions options = new BlobRequestOptions();
options.setRetryPolicyFactory(new RetryExponentialRetry(
copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff,
copyBlobMaxRetries));
dstBlob.startCopyFromBlob(srcBlob, options, getInstrumentedContext());
} else {
throw se;
}
}
waitForCopyToComplete(dstBlob, getInstrumentedContext());
safeDelete(srcBlob, lease);
} catch (StorageException e) {
// Re-throw exception as an Azure storage exception.
throw new AzureException(e);
} catch (URISyntaxException e) {
// Re-throw exception as an Azure storage exception.
throw new AzureException(e);
}
}
private void waitForCopyToComplete(CloudBlobWrapper blob, OperationContext opContext){
boolean copyInProgress = true;
while (copyInProgress) {
try {
blob.downloadAttributes(opContext);
}
catch (StorageException se){
}
// test for null because mocked filesystem doesn't know about copystates yet.
copyInProgress = (blob.getCopyState() != null && blob.getCopyState().getStatus() == CopyStatus.PENDING);
if (copyInProgress) {
try {
Thread.sleep(1000);
}
catch (InterruptedException ie){
//ignore
}
}
}
}
/**
* Changes the permission status on the given key.
*/
@Override
public void changePermissionStatus(String key, PermissionStatus newPermission)
throws AzureException {
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
storePermissionStatus(blob, newPermission);
blob.uploadMetadata(getInstrumentedContext());
} catch (Exception e) {
throw new AzureException(e);
}
}
@Override
public void purge(String prefix) throws IOException {
try {
// Attempts to purge may occur before opening any streams so first,
// check if a session exists, if not create a session with the Azure
// storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
if (checkContainer(ContainerAccessType.ReadThenWrite) == ContainerState.DoesntExist) {
// Container doesn't exist, no need to do anything.
return;
}
// Get all blob items with the given prefix from the container and delete
// them.
Iterable<ListBlobItem> objects = listRootBlobs(prefix, false);
for (ListBlobItem blobItem : objects) {
((CloudBlob) blobItem).delete(DeleteSnapshotsOption.NONE, null, null,
getInstrumentedContext());
}
} catch (Exception e) {
// Re-throw as an Azure storage exception.
//
throw new AzureException(e);
}
}
/**
* Get a lease on the blob identified by key. This lease will be renewed
* indefinitely by a background thread.
*/
@Override
public SelfRenewingLease acquireLease(String key) throws AzureException {
LOG.debug("acquiring lease on " + key);
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
return blob.acquireLease();
}
catch (Exception e) {
// Caught exception while attempting to get lease. Re-throw as an
// Azure storage exception.
throw new AzureException(e);
}
}
@Override
public void updateFolderLastModifiedTime(String key, Date lastModified,
SelfRenewingLease folderLease)
throws AzureException {
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
//setLastModified function is not available in 2.0.0 version. blob.uploadProperties automatically updates last modified
//timestamp to current time
blob.uploadProperties(getInstrumentedContext(), folderLease);
} catch (Exception e) {
// Caught exception while attempting to update the properties. Re-throw as an
// Azure storage exception.
throw new AzureException(e);
}
}
@Override
public void updateFolderLastModifiedTime(String key,
SelfRenewingLease folderLease) throws AzureException {
final Calendar lastModifiedCalendar = Calendar
.getInstance(Utility.LOCALE_US);
lastModifiedCalendar.setTimeZone(Utility.UTC_ZONE);
Date lastModified = lastModifiedCalendar.getTime();
updateFolderLastModifiedTime(key, lastModified, folderLease);
}
@Override
public void dump() throws IOException {
}
@Override
public void close() {
if(bandwidthGaugeUpdater != null) {
bandwidthGaugeUpdater.close();
bandwidthGaugeUpdater = null;
}
}
// Finalizer to ensure complete shutdown
@Override
protected void finalize() throws Throwable {
LOG.debug("finalize() called");
close();
super.finalize();
}
}
| 99,360 | 36.866235 | 125 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import java.net.HttpURLConnection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.Constants.HeaderConstants;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.RequestResult;
import com.microsoft.azure.storage.ResponseReceivedEvent;
import com.microsoft.azure.storage.StorageEvent;
/**
* An event listener to the ResponseReceived event from Azure Storage that will
* update metrics appropriately when it gets that event.
*/
@InterfaceAudience.Private
public final class ResponseReceivedMetricUpdater extends StorageEvent<ResponseReceivedEvent> {
public static final Log LOG = LogFactory.getLog(ResponseReceivedMetricUpdater.class);
private final AzureFileSystemInstrumentation instrumentation;
private final BandwidthGaugeUpdater blockUploadGaugeUpdater;
private ResponseReceivedMetricUpdater(OperationContext operationContext,
AzureFileSystemInstrumentation instrumentation,
BandwidthGaugeUpdater blockUploadGaugeUpdater) {
this.instrumentation = instrumentation;
this.blockUploadGaugeUpdater = blockUploadGaugeUpdater;
}
/**
* Hooks a new listener to the given operationContext that will update the
* metrics for the WASB file system appropriately in response to
* ResponseReceived events.
*
* @param operationContext The operationContext to hook.
* @param instrumentation The metrics source to update.
* @param blockUploadGaugeUpdater The blockUploadGaugeUpdater to use.
*/
public static void hook(
OperationContext operationContext,
AzureFileSystemInstrumentation instrumentation,
BandwidthGaugeUpdater blockUploadGaugeUpdater) {
ResponseReceivedMetricUpdater listener =
new ResponseReceivedMetricUpdater(operationContext,
instrumentation, blockUploadGaugeUpdater);
operationContext.getResponseReceivedEventHandler().addListener(listener);
}
/**
* Get the content length of the request in the given HTTP connection.
* @param connection The connection.
* @return The content length, or zero if not found.
*/
private long getRequestContentLength(HttpURLConnection connection) {
String lengthString = connection.getRequestProperty(
HeaderConstants.CONTENT_LENGTH);
if (lengthString != null){
return Long.parseLong(lengthString);
}
else{
return 0;
}
}
/**
* Gets the content length of the response in the given HTTP connection.
* @param connection The connection.
* @return The content length.
*/
private long getResponseContentLength(HttpURLConnection connection) {
return connection.getContentLength();
}
/**
* Handle the response-received event from Azure SDK.
*/
@Override
public void eventOccurred(ResponseReceivedEvent eventArg) {
instrumentation.webResponse();
if (!(eventArg.getConnectionObject() instanceof HttpURLConnection)) {
// Typically this shouldn't happen, but just let it pass
return;
}
HttpURLConnection connection =
(HttpURLConnection) eventArg.getConnectionObject();
RequestResult currentResult = eventArg.getRequestResult();
if (currentResult == null) {
// Again, typically shouldn't happen, but let it pass
return;
}
long requestLatency = currentResult.getStopDate().getTime()
- currentResult.getStartDate().getTime();
if (currentResult.getStatusCode() == HttpURLConnection.HTTP_CREATED
&& connection.getRequestMethod().equalsIgnoreCase("PUT")) {
// If it's a PUT with an HTTP_CREATED status then it's a successful
// block upload.
long length = getRequestContentLength(connection);
if (length > 0) {
blockUploadGaugeUpdater.blockUploaded(
currentResult.getStartDate(),
currentResult.getStopDate(),
length);
instrumentation.rawBytesUploaded(length);
instrumentation.blockUploaded(requestLatency);
}
} else if (currentResult.getStatusCode() == HttpURLConnection.HTTP_PARTIAL
&& connection.getRequestMethod().equalsIgnoreCase("GET")) {
// If it's a GET with an HTTP_PARTIAL status then it's a successful
// block download.
long length = getResponseContentLength(connection);
if (length > 0) {
blockUploadGaugeUpdater.blockDownloaded(
currentResult.getStartDate(),
currentResult.getStopDate(),
length);
instrumentation.rawBytesDownloaded(length);
instrumentation.blockDownloaded(requestLatency);
}
}
}
}
| 5,545 | 36.727891 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemInstrumentation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
/**
* A metrics source for the WASB file system to track all the metrics we care
* about for getting a clear picture of the performance/reliability/interaction
* of the Hadoop cluster with Azure Storage.
*/
@Metrics(about="Metrics for WASB", context="azureFileSystem")
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class AzureFileSystemInstrumentation implements MetricsSource {
public static final String METRIC_TAG_FILESYSTEM_ID = "wasbFileSystemId";
public static final String METRIC_TAG_ACCOUNT_NAME = "accountName";
public static final String METRIC_TAG_CONTAINTER_NAME = "containerName";
public static final String WASB_WEB_RESPONSES = "wasb_web_responses";
public static final String WASB_BYTES_WRITTEN =
"wasb_bytes_written_last_second";
public static final String WASB_BYTES_READ =
"wasb_bytes_read_last_second";
public static final String WASB_RAW_BYTES_UPLOADED =
"wasb_raw_bytes_uploaded";
public static final String WASB_RAW_BYTES_DOWNLOADED =
"wasb_raw_bytes_downloaded";
public static final String WASB_FILES_CREATED = "wasb_files_created";
public static final String WASB_FILES_DELETED = "wasb_files_deleted";
public static final String WASB_DIRECTORIES_CREATED = "wasb_directories_created";
public static final String WASB_DIRECTORIES_DELETED = "wasb_directories_deleted";
public static final String WASB_UPLOAD_RATE =
"wasb_maximum_upload_bytes_per_second";
public static final String WASB_DOWNLOAD_RATE =
"wasb_maximum_download_bytes_per_second";
public static final String WASB_UPLOAD_LATENCY =
"wasb_average_block_upload_latency_ms";
public static final String WASB_DOWNLOAD_LATENCY =
"wasb_average_block_download_latency_ms";
public static final String WASB_CLIENT_ERRORS = "wasb_client_errors";
public static final String WASB_SERVER_ERRORS = "wasb_server_errors";
/**
* Config key for how big the rolling window size for latency metrics should
* be (in seconds).
*/
private static final String KEY_ROLLING_WINDOW_SIZE = "fs.azure.metrics.rolling.window.size";
private final MetricsRegistry registry =
new MetricsRegistry("azureFileSystem")
.setContext("azureFileSystem");
private final MutableCounterLong numberOfWebResponses =
registry.newCounter(
WASB_WEB_RESPONSES,
"Total number of web responses obtained from Azure Storage",
0L);
private AtomicLong inMemoryNumberOfWebResponses = new AtomicLong(0);
private final MutableCounterLong numberOfFilesCreated =
registry.newCounter(
WASB_FILES_CREATED,
"Total number of files created through the WASB file system.",
0L);
private final MutableCounterLong numberOfFilesDeleted =
registry.newCounter(
WASB_FILES_DELETED,
"Total number of files deleted through the WASB file system.",
0L);
private final MutableCounterLong numberOfDirectoriesCreated =
registry.newCounter(
WASB_DIRECTORIES_CREATED,
"Total number of directories created through the WASB file system.",
0L);
private final MutableCounterLong numberOfDirectoriesDeleted =
registry.newCounter(
WASB_DIRECTORIES_DELETED,
"Total number of directories deleted through the WASB file system.",
0L);
private final MutableGaugeLong bytesWrittenInLastSecond =
registry.newGauge(
WASB_BYTES_WRITTEN,
"Total number of bytes written to Azure Storage during the last second.",
0L);
private final MutableGaugeLong bytesReadInLastSecond =
registry.newGauge(
WASB_BYTES_READ,
"Total number of bytes read from Azure Storage during the last second.",
0L);
private final MutableGaugeLong maximumUploadBytesPerSecond =
registry.newGauge(
WASB_UPLOAD_RATE,
"The maximum upload rate encountered to Azure Storage in bytes/second.",
0L);
private final MutableGaugeLong maximumDownloadBytesPerSecond =
registry.newGauge(
WASB_DOWNLOAD_RATE,
"The maximum download rate encountered to Azure Storage in bytes/second.",
0L);
private final MutableCounterLong rawBytesUploaded =
registry.newCounter(
WASB_RAW_BYTES_UPLOADED,
"Total number of raw bytes (including overhead) uploaded to Azure"
+ " Storage.",
0L);
private final MutableCounterLong rawBytesDownloaded =
registry.newCounter(
WASB_RAW_BYTES_DOWNLOADED,
"Total number of raw bytes (including overhead) downloaded from Azure"
+ " Storage.",
0L);
private final MutableCounterLong clientErrors =
registry.newCounter(
WASB_CLIENT_ERRORS,
"Total number of client-side errors by WASB (excluding 404).",
0L);
private final MutableCounterLong serverErrors =
registry.newCounter(
WASB_SERVER_ERRORS,
"Total number of server-caused errors by WASB.",
0L);
private final MutableGaugeLong averageBlockUploadLatencyMs;
private final MutableGaugeLong averageBlockDownloadLatencyMs;
private long currentMaximumUploadBytesPerSecond;
private long currentMaximumDownloadBytesPerSecond;
private static final int DEFAULT_LATENCY_ROLLING_AVERAGE_WINDOW =
5; // seconds
private final RollingWindowAverage currentBlockUploadLatency;
private final RollingWindowAverage currentBlockDownloadLatency;
private UUID fileSystemInstanceId;
public AzureFileSystemInstrumentation(Configuration conf) {
fileSystemInstanceId = UUID.randomUUID();
registry.tag("wasbFileSystemId",
"A unique identifier for the file ",
fileSystemInstanceId.toString());
final int rollingWindowSizeInSeconds =
conf.getInt(KEY_ROLLING_WINDOW_SIZE,
DEFAULT_LATENCY_ROLLING_AVERAGE_WINDOW);
averageBlockUploadLatencyMs =
registry.newGauge(
WASB_UPLOAD_LATENCY,
String.format("The average latency in milliseconds of uploading a single block"
+ ". The average latency is calculated over a %d-second rolling"
+ " window.", rollingWindowSizeInSeconds),
0L);
averageBlockDownloadLatencyMs =
registry.newGauge(
WASB_DOWNLOAD_LATENCY,
String.format("The average latency in milliseconds of downloading a single block"
+ ". The average latency is calculated over a %d-second rolling"
+ " window.", rollingWindowSizeInSeconds),
0L);
currentBlockUploadLatency =
new RollingWindowAverage(rollingWindowSizeInSeconds * 1000);
currentBlockDownloadLatency =
new RollingWindowAverage(rollingWindowSizeInSeconds * 1000);
}
/**
* The unique identifier for this file system in the metrics.
*/
public UUID getFileSystemInstanceId() {
return fileSystemInstanceId;
}
/**
* Get the metrics registry information.
*/
public MetricsInfo getMetricsRegistryInfo() {
return registry.info();
}
/**
* Sets the account name to tag all the metrics with.
* @param accountName The account name.
*/
public void setAccountName(String accountName) {
registry.tag("accountName",
"Name of the Azure Storage account that these metrics are going against",
accountName);
}
/**
* Sets the container name to tag all the metrics with.
* @param containerName The container name.
*/
public void setContainerName(String containerName) {
registry.tag("containerName",
"Name of the Azure Storage container that these metrics are going against",
containerName);
}
/**
* Indicate that we just got a web response from Azure Storage. This should
* be called for every web request/response we do (to get accurate metrics
* of how we're hitting the storage service).
*/
public void webResponse() {
numberOfWebResponses.incr();
inMemoryNumberOfWebResponses.incrementAndGet();
}
/**
* Gets the current number of web responses obtained from Azure Storage.
* @return The number of web responses.
*/
public long getCurrentWebResponses() {
return inMemoryNumberOfWebResponses.get();
}
/**
* Indicate that we just created a file through WASB.
*/
public void fileCreated() {
numberOfFilesCreated.incr();
}
/**
* Indicate that we just deleted a file through WASB.
*/
public void fileDeleted() {
numberOfFilesDeleted.incr();
}
/**
* Indicate that we just created a directory through WASB.
*/
public void directoryCreated() {
numberOfDirectoriesCreated.incr();
}
/**
* Indicate that we just deleted a directory through WASB.
*/
public void directoryDeleted() {
numberOfDirectoriesDeleted.incr();
}
/**
* Sets the current gauge value for how many bytes were written in the last
* second.
* @param currentBytesWritten The number of bytes.
*/
public void updateBytesWrittenInLastSecond(long currentBytesWritten) {
bytesWrittenInLastSecond.set(currentBytesWritten);
}
/**
* Sets the current gauge value for how many bytes were read in the last
* second.
* @param currentBytesRead The number of bytes.
*/
public void updateBytesReadInLastSecond(long currentBytesRead) {
bytesReadInLastSecond.set(currentBytesRead);
}
/**
* Record the current bytes-per-second upload rate seen.
* @param bytesPerSecond The bytes per second.
*/
public synchronized void currentUploadBytesPerSecond(long bytesPerSecond) {
if (bytesPerSecond > currentMaximumUploadBytesPerSecond) {
currentMaximumUploadBytesPerSecond = bytesPerSecond;
maximumUploadBytesPerSecond.set(bytesPerSecond);
}
}
/**
* Record the current bytes-per-second download rate seen.
* @param bytesPerSecond The bytes per second.
*/
public synchronized void currentDownloadBytesPerSecond(long bytesPerSecond) {
if (bytesPerSecond > currentMaximumDownloadBytesPerSecond) {
currentMaximumDownloadBytesPerSecond = bytesPerSecond;
maximumDownloadBytesPerSecond.set(bytesPerSecond);
}
}
/**
* Indicate that we just uploaded some data to Azure storage.
* @param numberOfBytes The raw number of bytes uploaded (including overhead).
*/
public void rawBytesUploaded(long numberOfBytes) {
rawBytesUploaded.incr(numberOfBytes);
}
/**
* Indicate that we just downloaded some data to Azure storage.
* @param numberOfBytes The raw number of bytes downloaded (including overhead).
*/
public void rawBytesDownloaded(long numberOfBytes) {
rawBytesDownloaded.incr(numberOfBytes);
}
/**
* Indicate that we just uploaded a block and record its latency.
* @param latency The latency in milliseconds.
*/
public void blockUploaded(long latency) {
currentBlockUploadLatency.addPoint(latency);
}
/**
* Indicate that we just downloaded a block and record its latency.
* @param latency The latency in milliseconds.
*/
public void blockDownloaded(long latency) {
currentBlockDownloadLatency.addPoint(latency);
}
/**
* Indicate that we just encountered a client-side error.
*/
public void clientErrorEncountered() {
clientErrors.incr();
}
/**
* Indicate that we just encountered a server-caused error.
*/
public void serverErrorEncountered() {
serverErrors.incr();
}
/**
* Get the current rolling average of the upload latency.
* @return rolling average of upload latency in milliseconds.
*/
public long getBlockUploadLatency() {
return currentBlockUploadLatency.getCurrentAverage();
}
/**
* Get the current rolling average of the download latency.
* @return rolling average of download latency in milliseconds.
*/
public long getBlockDownloadLatency() {
return currentBlockDownloadLatency.getCurrentAverage();
}
/**
* Get the current maximum upload bandwidth.
* @return maximum upload bandwidth in bytes per second.
*/
public long getCurrentMaximumUploadBandwidth() {
return currentMaximumUploadBytesPerSecond;
}
/**
* Get the current maximum download bandwidth.
* @return maximum download bandwidth in bytes per second.
*/
public long getCurrentMaximumDownloadBandwidth() {
return currentMaximumDownloadBytesPerSecond;
}
@Override
public void getMetrics(MetricsCollector builder, boolean all) {
averageBlockDownloadLatencyMs.set(
currentBlockDownloadLatency.getCurrentAverage());
averageBlockUploadLatencyMs.set(
currentBlockUploadLatency.getCurrentAverage());
registry.snapshot(builder.addRecord(registry.info().name()), true);
}
}
| 14,215 | 34.989873 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/RollingWindowAverage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import java.util.ArrayDeque;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Helper class to calculate rolling-window averages.
* Used to calculate rolling-window metrics in AzureNativeFileSystem.
*/
@InterfaceAudience.Private
final class RollingWindowAverage {
private final ArrayDeque<DataPoint> currentPoints =
new ArrayDeque<DataPoint>();
private final long windowSizeMs;
/**
* Create a new rolling-window average for the given window size.
* @param windowSizeMs The size of the window in milliseconds.
*/
public RollingWindowAverage(long windowSizeMs) {
this.windowSizeMs = windowSizeMs;
}
/**
* Add a new data point that just happened.
* @param value The value of the data point.
*/
public synchronized void addPoint(long value) {
currentPoints.offer(new DataPoint(new Date(), value));
cleanupOldPoints();
}
/**
* Get the current average.
* @return The current average.
*/
public synchronized long getCurrentAverage() {
cleanupOldPoints();
if (currentPoints.isEmpty()) {
return 0;
}
long sum = 0;
for (DataPoint current : currentPoints) {
sum += current.getValue();
}
return sum / currentPoints.size();
}
/**
* Clean up points that don't count any more (are before our
* rolling window) from our current queue of points.
*/
private void cleanupOldPoints() {
Date cutoffTime = new Date(new Date().getTime() - windowSizeMs);
while (!currentPoints.isEmpty()
&& currentPoints.peekFirst().getEventTime().before(cutoffTime)) {
currentPoints.removeFirst();
}
}
/**
* A single data point.
*/
private static class DataPoint {
private final Date eventTime;
private final long value;
public DataPoint(Date eventTime, long value) {
this.eventTime = eventTime;
this.value = value;
}
public Date getEventTime() {
return eventTime;
}
public long getValue() {
return value;
}
}
}
| 2,899 | 26.884615 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import static java.net.HttpURLConnection.HTTP_NOT_FOUND; //404
import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; //400
import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; //500
import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.RequestResult;
import com.microsoft.azure.storage.ResponseReceivedEvent;
import com.microsoft.azure.storage.StorageEvent;
/**
* An event listener to the ResponseReceived event from Azure Storage that will
* update error metrics appropriately when it gets that event.
*/
@InterfaceAudience.Private
public final class ErrorMetricUpdater extends StorageEvent<ResponseReceivedEvent> {
private final AzureFileSystemInstrumentation instrumentation;
private final OperationContext operationContext;
private ErrorMetricUpdater(OperationContext operationContext,
AzureFileSystemInstrumentation instrumentation) {
this.instrumentation = instrumentation;
this.operationContext = operationContext;
}
/**
* Hooks a new listener to the given operationContext that will update the
* error metrics for the WASB file system appropriately in response to
* ResponseReceived events.
*
* @param operationContext The operationContext to hook.
* @param instrumentation The metrics source to update.
*/
public static void hook(
OperationContext operationContext,
AzureFileSystemInstrumentation instrumentation) {
ErrorMetricUpdater listener =
new ErrorMetricUpdater(operationContext,
instrumentation);
operationContext.getResponseReceivedEventHandler().addListener(listener);
}
@Override
public void eventOccurred(ResponseReceivedEvent eventArg) {
RequestResult currentResult = operationContext.getLastResult();
int statusCode = currentResult.getStatusCode();
// Check if it's a client-side error: a 4xx status
// We exclude 404 because it happens frequently during the normal
// course of operation (each call to exists() would generate that
// if it's not found).
if (statusCode >= HTTP_BAD_REQUEST && statusCode < HTTP_INTERNAL_ERROR
&& statusCode != HTTP_NOT_FOUND) {
instrumentation.clientErrorEncountered();
} else if (statusCode >= HTTP_INTERNAL_ERROR) {
// It's a server error: a 5xx status. Could be an Azure Storage
// bug or (more likely) throttling.
instrumentation.serverErrorEncountered();
}
}
}
| 3,357 | 39.457831 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemMetricsSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
/**
* AzureFileSystemMetricsSystem
*/
@InterfaceAudience.Private
public final class AzureFileSystemMetricsSystem {
private static MetricsSystemImpl instance;
private static int numFileSystems;
//private ctor
private AzureFileSystemMetricsSystem(){
}
public static synchronized void fileSystemStarted() {
if (numFileSystems == 0) {
instance = new MetricsSystemImpl();
instance.init("azure-file-system");
}
numFileSystems++;
}
public static synchronized void fileSystemClosed() {
if (numFileSystems == 1) {
instance.publishMetricsNow();
instance.stop();
instance.shutdown();
instance = null;
}
numFileSystems--;
}
public static void registerSource(String name, String desc,
MetricsSource source) {
//caller has to use unique name to register source
instance.register(name, desc, source);
}
public static synchronized void unregisterSource(String name) {
if (instance != null) {
//publish metrics before unregister a metrics source
instance.publishMetricsNow();
instance.unregisterSource(name);
}
}
}
| 2,160 | 29.871429 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.metrics;
import java.util.ArrayList;
import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Internal implementation class to help calculate the current bytes
* uploaded/downloaded and the maximum bandwidth gauges.
*/
@InterfaceAudience.Private
public final class BandwidthGaugeUpdater {
public static final Log LOG = LogFactory
.getLog(BandwidthGaugeUpdater.class);
public static final String THREAD_NAME = "AzureNativeFilesystemStore-UploadBandwidthUpdater";
private static final int DEFAULT_WINDOW_SIZE_MS = 1000;
private static final int PROCESS_QUEUE_INITIAL_CAPACITY = 1000;
private int windowSizeMs;
private ArrayList<BlockTransferWindow> allBlocksWritten =
createNewToProcessQueue();
private ArrayList<BlockTransferWindow> allBlocksRead =
createNewToProcessQueue();
private final Object blocksWrittenLock = new Object();
private final Object blocksReadLock = new Object();
private final AzureFileSystemInstrumentation instrumentation;
private Thread uploadBandwidthUpdater;
private volatile boolean suppressAutoUpdate = false;
/**
* Create a new updater object with default values.
* @param instrumentation The metrics source to update.
*/
public BandwidthGaugeUpdater(AzureFileSystemInstrumentation instrumentation) {
this(instrumentation, DEFAULT_WINDOW_SIZE_MS, false);
}
/**
* Create a new updater object with some overrides (used in unit tests).
* @param instrumentation The metrics source to update.
* @param windowSizeMs The window size to use for calculating bandwidth
* (in milliseconds).
* @param manualUpdateTrigger If true, then this object won't create the
* auto-update threads, and will wait for manual
* calls to triggerUpdate to occur.
*/
public BandwidthGaugeUpdater(AzureFileSystemInstrumentation instrumentation,
int windowSizeMs, boolean manualUpdateTrigger) {
this.windowSizeMs = windowSizeMs;
this.instrumentation = instrumentation;
if (!manualUpdateTrigger) {
uploadBandwidthUpdater = new Thread(new UploadBandwidthUpdater(), THREAD_NAME);
uploadBandwidthUpdater.setDaemon(true);
uploadBandwidthUpdater.start();
}
}
/**
* Indicate that a block has been uploaded.
* @param startDate The exact time the upload started.
* @param endDate The exact time the upload ended.
* @param length The number of bytes uploaded in the block.
*/
public void blockUploaded(Date startDate, Date endDate, long length) {
synchronized (blocksWrittenLock) {
allBlocksWritten.add(new BlockTransferWindow(startDate, endDate, length));
}
}
/**
* Indicate that a block has been downloaded.
* @param startDate The exact time the download started.
* @param endDate The exact time the download ended.
* @param length The number of bytes downloaded in the block.
*/
public void blockDownloaded(Date startDate, Date endDate, long length) {
synchronized (blocksReadLock) {
allBlocksRead.add(new BlockTransferWindow(startDate, endDate, length));
}
}
/**
* Creates a new ArrayList to hold incoming block transfer notifications
* before they're processed.
* @return The newly created ArrayList.
*/
private static ArrayList<BlockTransferWindow> createNewToProcessQueue() {
return new ArrayList<BlockTransferWindow>(PROCESS_QUEUE_INITIAL_CAPACITY);
}
/**
* Update the metrics source gauge for how many bytes were transferred
* during the last time window.
* @param updateWrite If true, update the write (upload) counter.
* Otherwise update the read (download) counter.
* @param bytes The number of bytes transferred.
*/
private void updateBytesTransferred(boolean updateWrite, long bytes) {
if (updateWrite) {
instrumentation.updateBytesWrittenInLastSecond(bytes);
}
else {
instrumentation.updateBytesReadInLastSecond(bytes);
}
}
/**
* Update the metrics source gauge for what the current transfer rate
* is.
* @param updateWrite If true, update the write (upload) counter.
* Otherwise update the read (download) counter.
* @param bytesPerSecond The number of bytes per second we're seeing.
*/
private void updateBytesTransferRate(boolean updateWrite, long bytesPerSecond) {
if (updateWrite) {
instrumentation.currentUploadBytesPerSecond(bytesPerSecond);
}
else {
instrumentation.currentDownloadBytesPerSecond(bytesPerSecond);
}
}
/**
* For unit test purposes, suppresses auto-update of the metrics
* from the dedicated thread.
*/
public void suppressAutoUpdate() {
suppressAutoUpdate = true;
}
/**
* Resumes auto-update (undo suppressAutoUpdate).
*/
public void resumeAutoUpdate() {
suppressAutoUpdate = false;
}
/**
* Triggers the update of the metrics gauge based on all the blocks
* uploaded/downloaded so far. This is typically done periodically in a
* dedicated update thread, but exposing as public for unit test purposes.
*
* @param updateWrite If true, we'll update the write (upload) metrics.
* Otherwise we'll update the read (download) ones.
*/
public void triggerUpdate(boolean updateWrite) {
ArrayList<BlockTransferWindow> toProcess = null;
synchronized (updateWrite ? blocksWrittenLock : blocksReadLock) {
if (updateWrite && !allBlocksWritten.isEmpty()) {
toProcess = allBlocksWritten;
allBlocksWritten = createNewToProcessQueue();
} else if (!updateWrite && !allBlocksRead.isEmpty()) {
toProcess = allBlocksRead;
allBlocksRead = createNewToProcessQueue();
}
}
// Check to see if we have any blocks to process.
if (toProcess == null) {
// Nothing to process, set the current bytes and rate to zero.
updateBytesTransferred(updateWrite, 0);
updateBytesTransferRate(updateWrite, 0);
return;
}
// The cut-off time for when we want to calculate rates is one
// window size ago from now.
long cutoffTime = new Date().getTime() - windowSizeMs;
// Go through all the blocks we're processing, and calculate the
// total number of bytes processed as well as the maximum transfer
// rate we experienced for any single block during our time window.
long maxSingleBlockTransferRate = 0;
long bytesInLastSecond = 0;
for (BlockTransferWindow currentWindow : toProcess) {
long windowDuration = currentWindow.getEndDate().getTime()
- currentWindow.getStartDate().getTime();
if (windowDuration == 0) {
// Edge case, assume it took 1 ms but we were too fast
windowDuration = 1;
}
if (currentWindow.getStartDate().getTime() > cutoffTime) {
// This block was transferred fully within our time window,
// just add its bytes to the total.
bytesInLastSecond += currentWindow.bytesTransferred;
} else if (currentWindow.getEndDate().getTime() > cutoffTime) {
// This block started its transfer before our time window,
// interpolate to estimate how many bytes from that block
// were actually transferred during our time window.
long adjustedBytes = (currentWindow.getBytesTransferred()
* (currentWindow.getEndDate().getTime() - cutoffTime))
/ windowDuration;
bytesInLastSecond += adjustedBytes;
}
// Calculate the transfer rate for this block.
long currentBlockTransferRate =
(currentWindow.getBytesTransferred() * 1000) / windowDuration;
maxSingleBlockTransferRate =
Math.max(maxSingleBlockTransferRate, currentBlockTransferRate);
}
updateBytesTransferred(updateWrite, bytesInLastSecond);
// The transfer rate we saw in the last second is a tricky concept to
// define: If we saw two blocks, one 2 MB block transferred in 0.2 seconds,
// and one 4 MB block transferred in 0.2 seconds, then the maximum rate
// is 20 MB/s (the 4 MB block), the average of the two blocks is 15 MB/s,
// and the aggregate rate is 6 MB/s (total of 6 MB transferred in one
// second). As a first cut, I'm taking the definition to be the maximum
// of aggregate or of any single block's rate (so in the example case it's
// 6 MB/s).
long aggregateTransferRate = bytesInLastSecond;
long maxObservedTransferRate =
Math.max(aggregateTransferRate, maxSingleBlockTransferRate);
updateBytesTransferRate(updateWrite, maxObservedTransferRate);
}
/**
* A single block transfer.
*/
private static final class BlockTransferWindow {
private final Date startDate;
private final Date endDate;
private final long bytesTransferred;
public BlockTransferWindow(Date startDate, Date endDate,
long bytesTransferred) {
this.startDate = startDate;
this.endDate = endDate;
this.bytesTransferred = bytesTransferred;
}
public Date getStartDate() { return startDate; }
public Date getEndDate() { return endDate; }
public long getBytesTransferred() { return bytesTransferred; }
}
/**
* The auto-update thread.
*/
private final class UploadBandwidthUpdater implements Runnable {
@Override
public void run() {
try {
while (true) {
Thread.sleep(windowSizeMs);
if (!suppressAutoUpdate) {
triggerUpdate(true);
triggerUpdate(false);
}
}
} catch (InterruptedException e) {
}
}
}
public void close() {
if (uploadBandwidthUpdater != null) {
// Interrupt and join the updater thread in death.
uploadBandwidthUpdater.interrupt();
try {
uploadBandwidthUpdater.join();
} catch (InterruptedException e) {
}
uploadBandwidthUpdater = null;
}
}
}
| 10,906 | 36.740484 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.ByteArrayOutputStream;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.HarFileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.JarFinder;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
/**
* test {@link HadoopArchives}
*/
public class TestHadoopArchives {
public static final String HADOOP_ARCHIVES_JAR = JarFinder
.getJar(HadoopArchives.class);
{
((Log4JLogger) LogFactory.getLog(org.apache.hadoop.security.Groups.class))
.getLogger().setLevel(Level.ERROR);
}
private static final String inputDir = "input";
private Path inputPath;
private Path archivePath;
private final List<String> fileList = new ArrayList<String>();
private MiniDFSCluster dfscluster;
private Configuration conf;
private FileSystem fs;
private static String createFile(Path root, FileSystem fs, String... dirsAndFile
) throws IOException {
String fileBaseName = dirsAndFile[dirsAndFile.length - 1];
return createFile(root, fs, fileBaseName.getBytes("UTF-8"), dirsAndFile);
}
private static String createFile(Path root, FileSystem fs, byte[] fileContent, String... dirsAndFile
) throws IOException {
StringBuilder sb = new StringBuilder();
for (String segment: dirsAndFile) {
if (sb.length() > 0) {
sb.append(Path.SEPARATOR);
}
sb.append(segment);
}
final Path f = new Path(root, sb.toString());
final FSDataOutputStream out = fs.create(f);
try {
out.write(fileContent);
} finally {
out.close();
}
return sb.toString();
}
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + "."
+ CapacitySchedulerConfiguration.QUEUES, "default");
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + ".default."
+ CapacitySchedulerConfiguration.CAPACITY, "100");
dfscluster =
new MiniDFSCluster.Builder(conf).checkExitOnShutdown(true)
.numDataNodes(3).format(true).racks(null).build();
fs = dfscluster.getFileSystem();
// prepare archive path:
archivePath = new Path(fs.getHomeDirectory(), "archive");
fs.delete(archivePath, true);
// prepare input path:
inputPath = new Path(fs.getHomeDirectory(), inputDir);
fs.delete(inputPath, true);
fs.mkdirs(inputPath);
// create basic input files:
fileList.add(createFile(inputPath, fs, "a"));
fileList.add(createFile(inputPath, fs, "b"));
fileList.add(createFile(inputPath, fs, "c"));
}
@After
public void tearDown() throws Exception {
if (dfscluster != null) {
dfscluster.shutdown();
}
}
@Test
public void testRelativePath() throws Exception {
final Path sub1 = new Path(inputPath, "dir1");
fs.mkdirs(sub1);
createFile(inputPath, fs, sub1.getName(), "a");
final FsShell shell = new FsShell(conf);
final List<String> originalPaths = lsr(shell, "input");
System.out.println("originalPaths: " + originalPaths);
// make the archive:
final String fullHarPathStr = makeArchive();
// compare results:
final List<String> harPaths = lsr(shell, fullHarPathStr);
Assert.assertEquals(originalPaths, harPaths);
}
@Test
public void testRelativePathWitRepl() throws Exception {
final Path sub1 = new Path(inputPath, "dir1");
fs.mkdirs(sub1);
createFile(inputPath, fs, sub1.getName(), "a");
final FsShell shell = new FsShell(conf);
final List<String> originalPaths = lsr(shell, "input");
System.out.println("originalPaths: " + originalPaths);
// make the archive:
final String fullHarPathStr = makeArchiveWithRepl();
// compare results:
final List<String> harPaths = lsr(shell, fullHarPathStr);
Assert.assertEquals(originalPaths, harPaths);
}
@Test
public void testOutputPathValidity() throws Exception {
final String inputPathStr = inputPath.toUri().getPath();
final URI uri = fs.getUri();
final String harName = "foo.har";
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(conf);
PrintStream stderr = System.err;
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
PrintStream newErr = new PrintStream(byteStream);
System.setErr(newErr);
// fail if the archive path already exists
createFile(archivePath, fs, harName);
final String[] args = { "-archiveName", harName, "-p", inputPathStr, "*",
archivePath.toString() };
Assert.assertEquals(-1, ToolRunner.run(har, args));
String output = byteStream.toString();
final Path outputPath = new Path(archivePath, harName);
Assert.assertTrue(output.indexOf("Archive path: " + outputPath.toString()
+ " already exists") != -1);
byteStream.reset();
// fail if the destination directory is a file
createFile(archivePath, fs, "sub1");
final Path archivePath2 = new Path(archivePath, "sub1");
final String[] args2 = { "-archiveName", harName, "-p", inputPathStr, "*",
archivePath2.toString() };
Assert.assertEquals(-1, ToolRunner.run(har, args2));
output = byteStream.toString();
Assert.assertTrue(output.indexOf("Destination " + archivePath2.toString()
+ " should be a directory but is a file") != -1);
System.setErr(stderr);
}
@Test
public void testPathWithSpaces() throws Exception {
// create files/directories with spaces
createFile(inputPath, fs, "c c");
final Path sub1 = new Path(inputPath, "sub 1");
fs.mkdirs(sub1);
createFile(sub1, fs, "file x y z");
createFile(sub1, fs, "file");
createFile(sub1, fs, "x");
createFile(sub1, fs, "y");
createFile(sub1, fs, "z");
final Path sub2 = new Path(inputPath, "sub 1 with suffix");
fs.mkdirs(sub2);
createFile(sub2, fs, "z");
final FsShell shell = new FsShell(conf);
final String inputPathStr = inputPath.toUri().getPath();
final List<String> originalPaths = lsr(shell, inputPathStr);
// make the archive:
final String fullHarPathStr = makeArchive();
// compare results
final List<String> harPaths = lsr(shell, fullHarPathStr);
Assert.assertEquals(originalPaths, harPaths);
}
@Test
public void testSingleFile() throws Exception {
final Path sub1 = new Path(inputPath, "dir1");
fs.mkdirs(sub1);
String singleFileName = "a";
createFile(inputPath, fs, sub1.getName(), singleFileName);
final FsShell shell = new FsShell(conf);
final List<String> originalPaths = lsr(shell, sub1.toString());
System.out.println("originalPaths: " + originalPaths);
// make the archive:
final String fullHarPathStr = makeArchive(sub1, singleFileName);
// compare results:
final List<String> harPaths = lsr(shell, fullHarPathStr);
Assert.assertEquals(originalPaths, harPaths);
}
@Test
public void testGlobFiles() throws Exception {
final Path sub1 = new Path(inputPath, "dir1");
final Path sub2 = new Path(inputPath, "dir2");
fs.mkdirs(sub1);
String fileName = "a";
createFile(inputPath, fs, sub1.getName(), fileName);
createFile(inputPath, fs, sub2.getName(), fileName);
createFile(inputPath, fs, sub1.getName(), "b"); // not part of result
final String glob = "dir{1,2}/a";
final FsShell shell = new FsShell(conf);
final List<String> originalPaths = lsr(shell, inputPath.toString(),
inputPath + "/" + glob);
System.out.println("originalPaths: " + originalPaths);
// make the archive:
final String fullHarPathStr = makeArchive(inputPath, glob);
// compare results:
final List<String> harPaths = lsr(shell, fullHarPathStr,
fullHarPathStr + "/" + glob);
Assert.assertEquals(originalPaths, harPaths);
}
private static List<String> lsr(final FsShell shell, String rootDir) throws Exception {
return lsr(shell, rootDir, null);
}
private static List<String> lsr(final FsShell shell, String rootDir,
String glob) throws Exception {
final String dir = glob == null ? rootDir : glob;
System.out.println("lsr root=" + rootDir);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
final PrintStream oldErr = System.err;
System.setOut(out);
System.setErr(out);
final String results;
try {
Assert.assertEquals(0, shell.run(new String[] { "-lsr", dir }));
results = bytes.toString();
} finally {
IOUtils.closeStream(out);
System.setOut(oldOut);
System.setErr(oldErr);
}
System.out.println("lsr results:\n" + results);
String dirname = rootDir;
if (rootDir.lastIndexOf(Path.SEPARATOR) != -1) {
dirname = rootDir.substring(rootDir.lastIndexOf(Path.SEPARATOR));
}
final List<String> paths = new ArrayList<String>();
for (StringTokenizer t = new StringTokenizer(results, "\n"); t
.hasMoreTokens();) {
final String s = t.nextToken();
final int i = s.indexOf(dirname);
if (i >= 0) {
paths.add(s.substring(i + dirname.length()));
}
}
Collections.sort(paths);
System.out
.println("lsr paths = " + paths.toString().replace(", ", ",\n "));
return paths;
}
@Test
public void testReadFileContent() throws Exception {
fileList.add(createFile(inputPath, fs, "c c"));
final Path sub1 = new Path(inputPath, "sub 1");
fs.mkdirs(sub1);
fileList.add(createFile(inputPath, fs, sub1.getName(), "file x y z"));
fileList.add(createFile(inputPath, fs, sub1.getName(), "file"));
fileList.add(createFile(inputPath, fs, sub1.getName(), "x"));
fileList.add(createFile(inputPath, fs, sub1.getName(), "y"));
fileList.add(createFile(inputPath, fs, sub1.getName(), "z"));
final Path sub2 = new Path(inputPath, "sub 1 with suffix");
fs.mkdirs(sub2);
fileList.add(createFile(inputPath, fs, sub2.getName(), "z"));
// Generate a big binary file content:
final byte[] binContent = prepareBin();
fileList.add(createFile(inputPath, fs, binContent, sub2.getName(), "bin"));
fileList.add(createFile(inputPath, fs, new byte[0], sub2.getName(), "zero-length"));
final String fullHarPathStr = makeArchive();
// Create fresh HarFs:
final HarFileSystem harFileSystem = new HarFileSystem(fs);
try {
final URI harUri = new URI(fullHarPathStr);
harFileSystem.initialize(harUri, fs.getConf());
// now read the file content and compare it against the expected:
int readFileCount = 0;
for (final String pathStr0 : fileList) {
final Path path = new Path(fullHarPathStr + Path.SEPARATOR + pathStr0);
final String baseName = path.getName();
final FileStatus status = harFileSystem.getFileStatus(path);
if (status.isFile()) {
// read the file:
final byte[] actualContentSimple = readAllSimple(
harFileSystem.open(path), true);
final byte[] actualContentBuffer = readAllWithBuffer(
harFileSystem.open(path), true);
assertArrayEquals(actualContentSimple, actualContentBuffer);
final byte[] actualContentFully = readAllWithReadFully(
actualContentSimple.length,
harFileSystem.open(path), true);
assertArrayEquals(actualContentSimple, actualContentFully);
final byte[] actualContentSeek = readAllWithSeek(
actualContentSimple.length,
harFileSystem.open(path), true);
assertArrayEquals(actualContentSimple, actualContentSeek);
final byte[] actualContentRead4
= readAllWithRead4(harFileSystem.open(path), true);
assertArrayEquals(actualContentSimple, actualContentRead4);
final byte[] actualContentSkip = readAllWithSkip(
actualContentSimple.length,
harFileSystem.open(path),
harFileSystem.open(path),
true);
assertArrayEquals(actualContentSimple, actualContentSkip);
if ("bin".equals(baseName)) {
assertArrayEquals(binContent, actualContentSimple);
} else if ("zero-length".equals(baseName)) {
assertEquals(0, actualContentSimple.length);
} else {
String actual = new String(actualContentSimple, "UTF-8");
assertEquals(baseName, actual);
}
readFileCount++;
}
}
assertEquals(fileList.size(), readFileCount);
} finally {
harFileSystem.close();
}
}
private static byte[] readAllSimple(FSDataInputStream fsdis, boolean close) throws IOException {
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
int b;
while (true) {
b = fsdis.read();
if (b < 0) {
break;
} else {
baos.write(b);
}
}
baos.close();
return baos.toByteArray();
} finally {
if (close) {
fsdis.close();
}
}
}
private static byte[] readAllWithBuffer(FSDataInputStream fsdis, boolean close)
throws IOException {
try {
final int available = fsdis.available();
final byte[] buffer;
final ByteArrayOutputStream baos;
if (available < 0) {
buffer = new byte[1024];
baos = new ByteArrayOutputStream(buffer.length * 2);
} else {
buffer = new byte[available];
baos = new ByteArrayOutputStream(available);
}
int readIntoBuffer = 0;
int read;
while (true) {
read = fsdis.read(buffer, readIntoBuffer, buffer.length - readIntoBuffer);
if (read < 0) {
// end of stream:
if (readIntoBuffer > 0) {
baos.write(buffer, 0, readIntoBuffer);
}
return baos.toByteArray();
} else {
readIntoBuffer += read;
if (readIntoBuffer == buffer.length) {
// buffer is full, need to clean the buffer.
// drop the buffered data to baos:
baos.write(buffer);
// reset the counter to start reading to the buffer beginning:
readIntoBuffer = 0;
} else if (readIntoBuffer > buffer.length) {
throw new IOException("Read more than the buffer length: "
+ readIntoBuffer + ", buffer length = " + buffer.length);
}
}
}
} finally {
if (close) {
fsdis.close();
}
}
}
private static byte[] readAllWithReadFully(int totalLength, FSDataInputStream fsdis, boolean close)
throws IOException {
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
// Simulate reading of some data structures of known length:
final byte[] buffer = new byte[17];
final int times = totalLength / buffer.length;
final int remainder = totalLength % buffer.length;
// it would be simpler to leave the position tracking to the
// InputStream, but we need to check the methods #readFully(2)
// and #readFully(4) that receive the position as a parameter:
int position = 0;
try {
// read "data structures":
for (int i=0; i<times; i++) {
fsdis.readFully(position, buffer);
position += buffer.length;
baos.write(buffer);
}
if (remainder > 0) {
// read the remainder:
fsdis.readFully(position, buffer, 0, remainder);
position += remainder;
baos.write(buffer, 0, remainder);
}
try {
fsdis.readFully(position, buffer, 0, 1);
assertTrue(false);
} catch (IOException ioe) {
// okay
}
assertEquals(totalLength, position);
final byte[] result = baos.toByteArray();
assertEquals(totalLength, result.length);
return result;
} finally {
if (close) {
fsdis.close();
}
}
}
private static byte[] readAllWithRead4(FSDataInputStream fsdis, boolean close)
throws IOException {
try {
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final byte[] buffer = new byte[17];
int totalRead = 0;
int read;
while (true) {
read = fsdis.read(totalRead, buffer, 0, buffer.length);
if (read > 0) {
totalRead += read;
baos.write(buffer, 0, read);
} else if (read < 0) {
break; // EOF
} else {
// read == 0:
// zero result may be returned *only* in case if the 4th
// parameter is 0. Since in our case this is 'buffer.length',
// zero return value clearly indicates a bug:
throw new AssertionError("FSDataInputStream#read(4) returned 0, while " +
" the 4th method parameter is " + buffer.length + ".");
}
}
final byte[] result = baos.toByteArray();
return result;
} finally {
if (close) {
fsdis.close();
}
}
}
private static byte[] readAllWithSeek(final int totalLength,
final FSDataInputStream fsdis, final boolean close)
throws IOException {
final byte[] result = new byte[totalLength];
long pos;
try {
// read the data in the reverse order, from
// the tail to the head by pieces of 'buffer' length:
final byte[] buffer = new byte[17];
final int times = totalLength / buffer.length;
int read;
int expectedRead;
for (int i=times; i>=0; i--) {
pos = i * buffer.length;
fsdis.seek(pos);
// check that seek is successful:
assertEquals(pos, fsdis.getPos());
read = fsdis.read(buffer);
// check we read right number of bytes:
if (i == times) {
expectedRead = totalLength % buffer.length; // remainder
if (expectedRead == 0) {
// zero remainder corresponds to the EOS, so
// by the contract of DataInpitStream#read(byte[]) -1 should be
// returned:
expectedRead = -1;
}
} else {
expectedRead = buffer.length;
}
assertEquals(expectedRead, read);
if (read > 0) {
System.arraycopy(buffer, 0, result, (int)pos, read);
}
}
// finally, check that #seek() to not existing position leads to IOE:
expectSeekIOE(fsdis, Long.MAX_VALUE, "Seek to Long.MAX_VALUE should lead to IOE.");
expectSeekIOE(fsdis, Long.MIN_VALUE, "Seek to Long.MIN_VALUE should lead to IOE.");
long pp = -1L;
expectSeekIOE(fsdis, pp, "Seek to "+pp+" should lead to IOE.");
// NB: is is *possible* to #seek(length), but *impossible* to #seek(length + 1):
fsdis.seek(totalLength);
assertEquals(totalLength, fsdis.getPos());
pp = totalLength + 1;
expectSeekIOE(fsdis, pp, "Seek to the length position + 1 ("+pp+") should lead to IOE.");
return result;
} finally {
if (close) {
fsdis.close();
}
}
}
private static void expectSeekIOE(FSDataInputStream fsdis, long seekPos, String message) {
try {
fsdis.seek(seekPos);
assertTrue(message + " (Position = " + fsdis.getPos() + ")", false);
} catch (IOException ioe) {
// okay
}
}
/*
* Reads data by chunks from 2 input streams:
* reads chunk from stream 1, and skips this chunk in the stream 2;
* Then reads next chunk from stream 2, and skips this chunk in stream 1.
*/
private static byte[] readAllWithSkip(
final int totalLength,
final FSDataInputStream fsdis1,
final FSDataInputStream fsdis2,
final boolean close)
throws IOException {
// test negative skip arg:
assertEquals(0, fsdis1.skip(-1));
// test zero skip arg:
assertEquals(0, fsdis1.skip(0));
final ByteArrayOutputStream baos = new ByteArrayOutputStream(totalLength);
try {
// read the data in the reverse order, from
// the tail to the head by pieces of 'buffer' length:
final byte[] buffer = new byte[17];
final int times = totalLength / buffer.length;
final int remainder = totalLength % buffer.length;
long skipped;
long expectedPosition;
int toGo;
for (int i=0; i<=times; i++) {
toGo = (i < times) ? buffer.length : remainder;
if (i % 2 == 0) {
fsdis1.readFully(buffer, 0, toGo);
skipped = skipUntilZero(fsdis2, toGo);
} else {
fsdis2.readFully(buffer, 0, toGo);
skipped = skipUntilZero(fsdis1, toGo);
}
if (i < times) {
assertEquals(buffer.length, skipped);
expectedPosition = (i + 1) * buffer.length;
} else {
// remainder:
if (remainder > 0) {
assertEquals(remainder, skipped);
} else {
assertEquals(0, skipped);
}
expectedPosition = totalLength;
}
// check if the 2 streams have equal and correct positions:
assertEquals(expectedPosition, fsdis1.getPos());
assertEquals(expectedPosition, fsdis2.getPos());
// save the read data:
if (toGo > 0) {
baos.write(buffer, 0, toGo);
}
}
// finally, check up if ended stream cannot skip:
assertEquals(0, fsdis1.skip(-1));
assertEquals(0, fsdis1.skip(0));
assertEquals(0, fsdis1.skip(1));
assertEquals(0, fsdis1.skip(Long.MAX_VALUE));
return baos.toByteArray();
} finally {
if (close) {
fsdis1.close();
fsdis2.close();
}
}
}
private static long skipUntilZero(final FilterInputStream fis,
final long toSkip) throws IOException {
long skipped = 0;
long remainsToSkip = toSkip;
long s;
while (skipped < toSkip) {
s = fis.skip(remainsToSkip); // actually skippped
if (s == 0) {
return skipped; // EOF or impossible to skip.
}
skipped += s;
remainsToSkip -= s;
}
return skipped;
}
private static byte[] prepareBin() {
byte[] bb = new byte[77777];
for (int i=0; i<bb.length; i++) {
// Generate unique values, as possible:
double d = Math.log(i + 2);
long bits = Double.doubleToLongBits(d);
bb[i] = (byte)bits;
}
return bb;
}
private String makeArchive() throws Exception {
return makeArchive(inputPath, null);
}
/*
* Run the HadoopArchives tool to create an archive on the
* given file system.
*/
private String makeArchive(Path parentPath, String relGlob) throws Exception {
final String parentPathStr = parentPath.toUri().getPath();
final String relPathGlob = relGlob == null ? "*" : relGlob;
System.out.println("parentPathStr = " + parentPathStr);
final URI uri = fs.getUri();
final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
+ archivePath.toUri().getPath() + Path.SEPARATOR;
final String harName = "foo.har";
final String fullHarPathStr = prefix + harName;
final String[] args = { "-archiveName", harName, "-p", parentPathStr,
relPathGlob, archivePath.toString() };
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(conf);
assertEquals(0, ToolRunner.run(har, args));
return fullHarPathStr;
}
/*
* Run the HadoopArchives tool to create an archive on the
* given file system with a specified replication degree.
*/
private String makeArchiveWithRepl() throws Exception {
final String inputPathStr = inputPath.toUri().getPath();
System.out.println("inputPathStr = " + inputPathStr);
final URI uri = fs.getUri();
final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
+ archivePath.toUri().getPath() + Path.SEPARATOR;
final String harName = "foo.har";
final String fullHarPathStr = prefix + harName;
final String[] args =
{ "-archiveName", harName, "-p", inputPathStr, "-r", "2", "*",
archivePath.toString() };
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(conf);
assertEquals(0, ToolRunner.run(har, args));
RemoteIterator<LocatedFileStatus> listFiles =
fs.listFiles(new Path(archivePath.toString() + "/" + harName), false);
while (listFiles.hasNext()) {
LocatedFileStatus next = listFiles.next();
if (!next.getPath().toString().endsWith("_SUCCESS")) {
assertEquals(next.getPath().toString(), 2, next.getReplication());
}
}
return fullHarPathStr;
}
@Test
/*
* Tests copying from archive file system to a local file system
*/
public void testCopyToLocal() throws Exception {
final String fullHarPathStr = makeArchive();
// make path to copy the file to:
final String tmpDir
= System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp";
final Path tmpPath = new Path(tmpDir);
final LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
localFs.delete(tmpPath, true);
localFs.mkdirs(tmpPath);
assertTrue(localFs.exists(tmpPath));
// Create fresh HarFs:
final HarFileSystem harFileSystem = new HarFileSystem(fs);
try {
final URI harUri = new URI(fullHarPathStr);
harFileSystem.initialize(harUri, fs.getConf());
final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a");
final Path targetPath = new Path(tmpPath, "straus");
// copy the Har file to a local file system:
harFileSystem.copyToLocalFile(false, sourcePath, targetPath);
FileStatus straus = localFs.getFileStatus(targetPath);
// the file should contain just 1 character:
assertEquals(1, straus.getLen());
} finally {
harFileSystem.close();
localFs.delete(tmpPath, true);
}
}
}
| 28,112 | 33.836431 | 103 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.