repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Assert;
import org.junit.Test;
public class TestJUnitSetup {
public static final Log LOG = LogFactory.getLog(TestJUnitSetup.class);
@Test
public void testJavaAssert() {
try {
assert false : "Good! Java assert is on.";
} catch(AssertionError ae) {
LOG.info("The AssertionError is expected.", ae);
return;
}
Assert.fail("Java assert does not work.");
}
}
| 1,340 | 32.525 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MockitoUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.Closeable;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.mockito.stubbing.Stubber;
public abstract class MockitoUtil {
/**
* Return a mock object for an IPC protocol. This special
* method is necessary, since the IPC proxies have to implement
* Closeable in addition to their protocol interface.
* @param clazz the protocol class
*/
public static <T> T mockProtocol(Class<T> clazz) {
return Mockito.mock(clazz,
Mockito.withSettings().extraInterfaces(Closeable.class));
}
/**
* Throw an exception from the mock/spy only in the case that the
* call stack at the time the method has a line which matches the given
* pattern.
*
* @param t the Throwable to throw
* @param pattern the pattern against which to match the call stack trace
* @return the stub in progress
*/
public static Stubber doThrowWhenCallStackMatches(
final Throwable t, final String pattern) {
return Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
t.setStackTrace(Thread.currentThread().getStackTrace());
for (StackTraceElement elem : t.getStackTrace()) {
if (elem.toString().matches(pattern)) {
throw t;
}
}
return invocation.callRealMethod();
}
});
}
}
| 2,287 | 34.2 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
/**
* A utility to easily test threaded/synchronized code.
* Utility works by letting you add threads that do some work to a
* test context object, and then lets you kick them all off to stress test
* your parallel code.
*
* Also propagates thread exceptions back to the runner, to let you verify.
*
* An example:
*
* <code>
* final AtomicInteger threadsRun = new AtomicInteger();
*
* TestContext ctx = new TestContext();
* // Add 3 threads to test.
* for (int i = 0; i < 3; i++) {
* ctx.addThread(new TestingThread(ctx) {
* @Override
* public void doWork() throws Exception {
* threadsRun.incrementAndGet();
* }
* });
* }
* ctx.startThreads();
* // Set a timeout period for threads to complete.
* ctx.waitFor(30000);
* assertEquals(3, threadsRun.get());
* </code>
*
* For repetitive actions, use the {@link MultithreadedTestUtil.RepeatingThread}
* instead.
*
* (More examples can be found in {@link TestMultithreadedTestUtil})
*/
public abstract class MultithreadedTestUtil {
public static final Log LOG =
LogFactory.getLog(MultithreadedTestUtil.class);
/**
* TestContext is used to setup the multithreaded test runner.
* It lets you add threads, run them, wait upon or stop them.
*/
public static class TestContext {
private Throwable err = null;
private boolean stopped = false;
private Set<TestingThread> testThreads = new HashSet<TestingThread>();
private Set<TestingThread> finishedThreads = new HashSet<TestingThread>();
/**
* Check if the context can run threads.
* Can't if its been stopped and contains an error.
* @return true if it can run, false if it can't.
*/
public synchronized boolean shouldRun() {
return !stopped && err == null;
}
/**
* Add a thread to the context for running.
* Threads can be of type {@link MultithreadedTestUtil.TestingThread}
* or {@link MultithreadedTestUtil.RepeatingTestThread}
* or other custom derivatives of the former.
* @param t the thread to add for running.
*/
public void addThread(TestingThread t) {
testThreads.add(t);
}
/**
* Starts all test threads that have been added so far.
*/
public void startThreads() {
for (TestingThread t : testThreads) {
t.start();
}
}
/**
* Waits for threads to finish or error out.
* @param millis the number of milliseconds to wait
* for threads to complete.
* @throws Exception if one or more of the threads
* have thrown up an error.
*/
public synchronized void waitFor(long millis) throws Exception {
long endTime = Time.now() + millis;
while (shouldRun() &&
finishedThreads.size() < testThreads.size()) {
long left = endTime - Time.now();
if (left <= 0) break;
checkException();
wait(left);
}
checkException();
}
/**
* Checks for thread exceptions, and if they've occurred
* throws them as RuntimeExceptions in a deferred manner.
*/
public synchronized void checkException() throws Exception {
if (err != null) {
throw new RuntimeException("Deferred", err);
}
}
/**
* Called by {@link MultithreadedTestUtil.TestingThread}s to signal
* a failed thread.
* @param t the thread that failed.
*/
public synchronized void threadFailed(Throwable t) {
if (err == null) err = t;
LOG.error("Failed!", err);
notify();
}
/**
* Called by {@link MultithreadedTestUtil.TestingThread}s to signal
* a successful completion.
* @param t the thread that finished.
*/
public synchronized void threadDone(TestingThread t) {
finishedThreads.add(t);
notify();
}
/**
* Returns after stopping all threads by joining them back.
* @throws Exception in case a thread terminated with a failure.
*/
public void stop() throws Exception {
synchronized (this) {
stopped = true;
}
for (TestingThread t : testThreads) {
t.join();
}
checkException();
}
public Iterable<? extends Thread> getTestThreads() {
return testThreads;
}
}
/**
* A thread that can be added to a test context, and properly
* passes exceptions through.
*/
public static abstract class TestingThread extends Thread {
protected final TestContext ctx;
protected boolean stopped;
public TestingThread(TestContext ctx) {
this.ctx = ctx;
}
@Override
public void run() {
try {
doWork();
} catch (Throwable t) {
ctx.threadFailed(t);
}
ctx.threadDone(this);
}
/**
* User method to add any code to test thread behavior of.
* @throws Exception throw an exception if a failure has occurred.
*/
public abstract void doWork() throws Exception;
protected void stopTestThread() {
this.stopped = true;
}
}
/**
* A test thread that performs a repeating operation.
*/
public static abstract class RepeatingTestThread extends TestingThread {
public RepeatingTestThread(TestContext ctx) {
super(ctx);
}
/**
* Repeats a given user action until the context is asked to stop
* or meets an error.
*/
@Override
public final void doWork() throws Exception {
while (ctx.shouldRun() && !stopped) {
doAnAction();
}
}
/**
* User method for any code to test repeating behavior of (as threads).
* @throws Exception throw an exception if a failure has occured.
*/
public abstract void doAnAction() throws Exception;
}
}
| 6,740 | 27.93133 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/CoreTestDriver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import org.apache.hadoop.io.TestArrayFile;
import org.apache.hadoop.io.TestSetFile;
import org.apache.hadoop.ipc.TestIPC;
import org.apache.hadoop.ipc.TestRPC;
import org.apache.hadoop.util.ProgramDriver;
/**
* Driver for core tests.
*/
public class CoreTestDriver {
private ProgramDriver pgd;
public CoreTestDriver() {
this(new ProgramDriver());
}
public CoreTestDriver(ProgramDriver pgd) {
this.pgd = pgd;
try {
pgd.addClass("testsetfile", TestSetFile.class,
"A test for flat files of binary key/value pairs.");
pgd.addClass("testarrayfile", TestArrayFile.class,
"A test for flat files of binary key/value pairs.");
pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
} catch(Throwable e) {
e.printStackTrace();
}
}
public void run(String argv[]) {
int exitCode = -1;
try {
exitCode = pgd.run(argv);
} catch(Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
public static void main(String argv[]){
new CoreTestDriver().run(argv);
}
}
| 1,995 | 28.791045 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/UnitTestcaseTimeLimit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import org.junit.Rule;
import org.junit.rules.TestRule;
import org.junit.rules.Timeout;
/**
* Class for test units to extend in order that their individual tests will
* be timed out and fail automatically should they run more than 10 seconds.
* This provides an automatic regression check for tests that begin running
* longer than expected.
*/
public class UnitTestcaseTimeLimit {
public final int timeOutSecs = 10;
@Rule public TestRule globalTimeout = new Timeout(timeOutSecs * 1000);
}
| 1,343 | 37.4 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.jmx;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.http.HttpServerFunctionalTest;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.apache.hadoop.jmx.JMXJsonServlet.ACCESS_CONTROL_ALLOW_METHODS;
import static org.apache.hadoop.jmx.JMXJsonServlet.ACCESS_CONTROL_ALLOW_ORIGIN;
public class TestJMXJsonServlet extends HttpServerFunctionalTest {
private static HttpServer2 server;
private static URL baseUrl;
@BeforeClass public static void setup() throws Exception {
server = createTestServer();
server.start();
baseUrl = getServerURL(server);
}
@AfterClass public static void cleanup() throws Exception {
server.stop();
}
public static void assertReFind(String re, String value) {
Pattern p = Pattern.compile(re);
Matcher m = p.matcher(value);
assertTrue("'"+p+"' does not match "+value, m.find());
}
@Test public void testQury() throws Exception {
String result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Runtime"));
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Runtime\"", result);
assertReFind("\"modelerType\"", result);
result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory"));
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
assertReFind("\"modelerType\"", result);
result = readOutput(new URL(baseUrl, "/jmx"));
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
// test to get an attribute of a mbean
result = readOutput(new URL(baseUrl,
"/jmx?get=java.lang:type=Memory::HeapMemoryUsage"));
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
assertReFind("\"committed\"\\s*:", result);
// negative test to get an attribute of a mbean
result = readOutput(new URL(baseUrl,
"/jmx?get=java.lang:type=Memory::"));
assertReFind("\"ERROR\"", result);
// test to CORS headers
HttpURLConnection conn = (HttpURLConnection)
new URL(baseUrl, "/jmx?qry=java.lang:type=Memory").openConnection();
assertEquals("GET", conn.getHeaderField(ACCESS_CONTROL_ALLOW_METHODS));
assertNotNull(conn.getHeaderField(ACCESS_CONTROL_ALLOW_ORIGIN));
}
}
| 3,223 | 36.929412 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics/TestMetricsServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.metrics.MetricsServlet.TagsMetricsPair;
import org.apache.hadoop.metrics.spi.NoEmitMetricsContext;
import org.apache.hadoop.metrics.spi.OutputRecord;
import org.mortbay.util.ajax.JSON;
public class TestMetricsServlet extends TestCase {
MetricsContext nc1;
MetricsContext nc2;
// List containing nc1 and nc2.
List<MetricsContext> contexts;
OutputRecord outputRecord;
/**
* Initializes, for testing, two NoEmitMetricsContext's, and adds one value
* to the first of them.
*/
@Override
public void setUp() throws IOException {
nc1 = new NoEmitMetricsContext();
nc1.init("test1", ContextFactory.getFactory());
nc2 = new NoEmitMetricsContext();
nc2.init("test2", ContextFactory.getFactory());
contexts = new ArrayList<MetricsContext>();
contexts.add(nc1);
contexts.add(nc2);
MetricsRecord r = nc1.createRecord("testRecord");
r.setTag("testTag1", "testTagValue1");
r.setTag("testTag2", "testTagValue2");
r.setMetric("testMetric1", 1);
r.setMetric("testMetric2", 33);
r.update();
Map<String, Collection<OutputRecord>> m = nc1.getAllRecords();
assertEquals(1, m.size());
assertEquals(1, m.values().size());
Collection<OutputRecord> outputRecords = m.values().iterator().next();
assertEquals(1, outputRecords.size());
outputRecord = outputRecords.iterator().next();
}
public void testTagsMetricsPair() throws IOException {
TagsMetricsPair pair = new TagsMetricsPair(outputRecord.getTagsCopy(),
outputRecord.getMetricsCopy());
String s = JSON.toString(pair);
assertEquals(
"[{\"testTag1\":\"testTagValue1\",\"testTag2\":\"testTagValue2\"},"+
"{\"testMetric1\":1,\"testMetric2\":33}]", s);
}
public void testGetMap() throws IOException {
MetricsServlet servlet = new MetricsServlet();
Map<String, Map<String, List<TagsMetricsPair>>> m = servlet.makeMap(contexts);
assertEquals("Map missing contexts", 2, m.size());
assertTrue(m.containsKey("test1"));
Map<String, List<TagsMetricsPair>> m2 = m.get("test1");
assertEquals("Missing records", 1, m2.size());
assertTrue(m2.containsKey("testRecord"));
assertEquals("Wrong number of tags-values pairs.", 1, m2.get("testRecord").size());
}
public void testPrintMap() throws IOException {
StringWriter sw = new StringWriter();
PrintWriter out = new PrintWriter(sw);
MetricsServlet servlet = new MetricsServlet();
servlet.printMap(out, servlet.makeMap(contexts));
String EXPECTED = "" +
"test1\n" +
" testRecord\n" +
" {testTag1=testTagValue1,testTag2=testTagValue2}:\n" +
" testMetric1=1\n" +
" testMetric2=33\n" +
"test2\n";
assertEquals(EXPECTED, sw.toString());
}
}
| 3,886 | 33.705357 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics/spi/TestOutputRecord.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics.spi;
import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap;
import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
import junit.framework.TestCase;
public class TestOutputRecord extends TestCase {
public void testCopy() {
TagMap tags = new TagMap();
tags.put("tagkey", "tagval");
MetricMap metrics = new MetricMap();
metrics.put("metrickey", 123.4);
OutputRecord r = new OutputRecord(tags, metrics);
assertEquals(tags, r.getTagsCopy());
assertNotSame(tags, r.getTagsCopy());
assertEquals(metrics, r.getMetricsCopy());
assertNotSame(metrics, r.getMetricsCopy());
}
}
| 1,489 | 37.205128 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics/ganglia/TestGangliaContext.java
|
/*
* TestGangliaContext.java
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics.ganglia;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.metrics.ContextFactory;
import org.apache.hadoop.metrics.spi.AbstractMetricsContext;
import java.net.MulticastSocket;
public class TestGangliaContext {
@Test
public void testShouldCreateDatagramSocketByDefault() throws Exception {
GangliaContext context = new GangliaContext();
context.init("gangliaContext", ContextFactory.getFactory());
assertFalse("Created MulticastSocket", context.datagramSocket instanceof MulticastSocket);
}
@Test
public void testShouldCreateDatagramSocketIfMulticastIsDisabled() throws Exception {
GangliaContext context = new GangliaContext();
ContextFactory factory = ContextFactory.getFactory();
factory.setAttribute("gangliaContext.multicast", "false");
context.init("gangliaContext", factory);
assertFalse("Created MulticastSocket", context.datagramSocket instanceof MulticastSocket);
}
@Test
public void testShouldCreateMulticastSocket() throws Exception {
GangliaContext context = new GangliaContext();
ContextFactory factory = ContextFactory.getFactory();
factory.setAttribute("gangliaContext.multicast", "true");
context.init("gangliaContext", factory);
assertTrue("Did not create MulticastSocket", context.datagramSocket instanceof MulticastSocket);
MulticastSocket multicastSocket = (MulticastSocket) context.datagramSocket;
assertEquals("Did not set default TTL", multicastSocket.getTimeToLive(), 1);
}
@Test
public void testShouldSetMulticastSocketTtl() throws Exception {
GangliaContext context = new GangliaContext();
ContextFactory factory = ContextFactory.getFactory();
factory.setAttribute("gangliaContext.multicast", "true");
factory.setAttribute("gangliaContext.multicast.ttl", "10");
context.init("gangliaContext", factory);
MulticastSocket multicastSocket = (MulticastSocket) context.datagramSocket;
assertEquals("Did not set TTL", multicastSocket.getTimeToLive(), 10);
}
@Test
public void testCloseShouldCloseTheSocketWhichIsCreatedByInit() throws Exception {
AbstractMetricsContext context=new GangliaContext();
context.init("gangliaContext", ContextFactory.getFactory());
GangliaContext gangliaContext =(GangliaContext) context;
assertFalse("Socket already closed",gangliaContext.datagramSocket.isClosed());
context.close();
assertTrue("Socket not closed",gangliaContext.datagramSocket.isClosed());
}
}
| 3,467 | 40.285714 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.sink;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.regex.Pattern;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.annotation.Metric.Type;
import org.apache.hadoop.metrics2.impl.ConfigBuilder;
import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.junit.After;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestFileSink {
private File outFile;
// The 2 sample metric classes:
@Metrics(name="testRecord1", context="test1")
static class MyMetrics1 {
@Metric(value={"testTag1", ""}, type=Type.TAG)
String testTag1() { return "testTagValue1"; }
@Metric(value={"testTag2", ""}, type=Type.TAG)
String gettestTag2() { return "testTagValue2"; }
@Metric(value={"testMetric1", "An integer gauge"},always=true)
MutableGaugeInt testMetric1;
@Metric(value={"testMetric2", "An integer gauge"},always=true)
MutableGaugeInt testMetric2;
public MyMetrics1 registerWith(MetricsSystem ms) {
return ms.register("m1", null, this);
}
}
@Metrics(name="testRecord2", context="test1")
static class MyMetrics2 {
@Metric(value={"testTag22", ""}, type=Type.TAG)
String testTag1() { return "testTagValue22"; }
public MyMetrics2 registerWith(MetricsSystem ms) {
return ms.register("m2", null, this);
}
}
private File getTestTempFile(String prefix, String suffix) throws IOException {
String tmpPath = System.getProperty("java.io.tmpdir", "/tmp");
String user = System.getProperty("user.name", "unknown-user");
File dir = new File(tmpPath + "/" + user);
dir.mkdirs();
return File.createTempFile(prefix, suffix, dir);
}
@Test(timeout=6000)
public void testFileSink() throws IOException {
outFile = getTestTempFile("test-file-sink-", ".out");
final String outPath = outFile.getAbsolutePath();
// NB: specify large period to avoid multiple metrics snapshotting:
new ConfigBuilder().add("*.period", 10000)
.add("test.sink.mysink0.class", FileSink.class.getName())
.add("test.sink.mysink0.filename", outPath)
// NB: we filter by context to exclude "metricssystem" context metrics:
.add("test.sink.mysink0.context", "test1")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("test");
ms.start();
final MyMetrics1 mm1
= new MyMetrics1().registerWith(ms);
new MyMetrics2().registerWith(ms);
mm1.testMetric1.incr();
mm1.testMetric2.incr(2);
ms.publishMetricsNow(); // publish the metrics
ms.stop();
ms.shutdown();
InputStream is = null;
ByteArrayOutputStream baos = null;
String outFileContent = null;
try {
is = new FileInputStream(outFile);
baos = new ByteArrayOutputStream((int)outFile.length());
IOUtils.copyBytes(is, baos, 1024, true);
outFileContent = new String(baos.toByteArray(), "UTF-8");
} finally {
IOUtils.cleanup(null, baos, is);
}
// Check the out file content. Should be something like the following:
//1360244820087 test1.testRecord1: Context=test1, testTag1=testTagValue1, testTag2=testTagValue2, Hostname=myhost, testMetric1=1, testMetric2=2
//1360244820089 test1.testRecord2: Context=test1, testTag22=testTagValue22, Hostname=myhost
// Note that in the below expression we allow tags and metrics to go in arbitrary order.
Pattern expectedContentPattern = Pattern.compile(
// line #1:
"^\\d+\\s+test1.testRecord1:\\s+Context=test1,\\s+" +
"(testTag1=testTagValue1,\\s+testTag2=testTagValue2|testTag2=testTagValue2,\\s+testTag1=testTagValue1)," +
"\\s+Hostname=.*,\\s+(testMetric1=1,\\s+testMetric2=2|testMetric2=2,\\s+testMetric1=1)" +
// line #2:
"$[\\n\\r]*^\\d+\\s+test1.testRecord2:\\s+Context=test1," +
"\\s+testTag22=testTagValue22,\\s+Hostname=.*$[\\n\\r]*",
Pattern.MULTILINE);
assertTrue(expectedContentPattern.matcher(outFileContent).matches());
}
@After
public void after() {
if (outFile != null) {
outFile.delete();
assertTrue(!outFile.exists());
}
}
}
| 5,441 | 36.273973 | 147 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/ganglia/TestGangliaSink.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.sink.ganglia;
import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.hadoop.metrics2.impl.ConfigBuilder;
import org.junit.Test;
import java.net.DatagramSocket;
import java.net.MulticastSocket;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestGangliaSink {
@Test
public void testShouldCreateDatagramSocketByDefault() throws Exception {
SubsetConfiguration conf = new ConfigBuilder()
.subset("test.sink.ganglia");
GangliaSink30 gangliaSink = new GangliaSink30();
gangliaSink.init(conf);
DatagramSocket socket = gangliaSink.getDatagramSocket();
assertFalse("Did not create DatagramSocket", socket == null || socket instanceof MulticastSocket);
}
@Test
public void testShouldCreateDatagramSocketIfMulticastIsDisabled() throws Exception {
SubsetConfiguration conf = new ConfigBuilder()
.add("test.sink.ganglia.multicast", false)
.subset("test.sink.ganglia");
GangliaSink30 gangliaSink = new GangliaSink30();
gangliaSink.init(conf);
DatagramSocket socket = gangliaSink.getDatagramSocket();
assertFalse("Did not create DatagramSocket", socket == null || socket instanceof MulticastSocket);
}
@Test
public void testShouldCreateMulticastSocket() throws Exception {
SubsetConfiguration conf = new ConfigBuilder()
.add("test.sink.ganglia.multicast", true)
.subset("test.sink.ganglia");
GangliaSink30 gangliaSink = new GangliaSink30();
gangliaSink.init(conf);
DatagramSocket socket = gangliaSink.getDatagramSocket();
assertTrue("Did not create MulticastSocket", socket != null && socket instanceof MulticastSocket);
int ttl = ((MulticastSocket) socket).getTimeToLive();
assertEquals("Did not set default TTL", 1, ttl);
}
@Test
public void testShouldSetMulticastSocketTtl() throws Exception {
SubsetConfiguration conf = new ConfigBuilder()
.add("test.sink.ganglia.multicast", true)
.add("test.sink.ganglia.multicast.ttl", 3)
.subset("test.sink.ganglia");
GangliaSink30 gangliaSink = new GangliaSink30();
gangliaSink.init(conf);
DatagramSocket socket = gangliaSink.getDatagramSocket();
assertTrue("Did not create MulticastSocket", socket != null && socket instanceof MulticastSocket);
int ttl = ((MulticastSocket) socket).getTimeToLive();
assertEquals("Did not set TTL", 3, ttl);
}
}
| 3,513 | 41.853659 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaMetricsTestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.sink.ganglia;
import java.net.DatagramSocket;
/**
* Helper class in the same package as ganglia sinks to be used by unit tests
*/
public class GangliaMetricsTestHelper {
/**
* Helper method to access package private method to set DatagramSocket
* needed for Unit test
* @param gangliaSink
* @param datagramSocket
*/
public static void setDatagramSocket(AbstractGangliaSink gangliaSink,
DatagramSocket datagramSocket) {
gangliaSink.setDatagramSocket(datagramSocket);
}
}
| 1,350 | 32.775 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.filter;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.configuration.SubsetConfiguration;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.hadoop.metrics2.MetricsFilter;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.ConfigBuilder;
import static org.apache.hadoop.metrics2.lib.Interns.*;
public class TestPatternFilter {
/**
* Filters should default to accept
*/
@Test public void emptyConfigShouldAccept() {
SubsetConfiguration empty = new ConfigBuilder().subset("");
shouldAccept(empty, "anything");
shouldAccept(empty, Arrays.asList(tag("key", "desc", "value")));
shouldAccept(empty, mockMetricsRecord("anything", Arrays.asList(
tag("key", "desc", "value"))));
}
/**
* Filters should handle white-listing correctly
*/
@Test public void includeOnlyShouldOnlyIncludeMatched() {
SubsetConfiguration wl = new ConfigBuilder()
.add("p.include", "foo")
.add("p.include.tags", "foo:f").subset("p");
shouldAccept(wl, "foo");
shouldAccept(wl, Arrays.asList(tag("bar", "", ""),
tag("foo", "", "f")), new boolean[] {false, true});
shouldAccept(wl, mockMetricsRecord("foo", Arrays.asList(
tag("bar", "", ""), tag("foo", "", "f"))));
shouldReject(wl, "bar");
shouldReject(wl, Arrays.asList(tag("bar", "", "")));
shouldReject(wl, Arrays.asList(tag("foo", "", "boo")));
shouldReject(wl, mockMetricsRecord("bar", Arrays.asList(
tag("foo", "", "f"))));
shouldReject(wl, mockMetricsRecord("foo", Arrays.asList(
tag("bar", "", ""))));
}
/**
* Filters should handle black-listing correctly
*/
@Test public void excludeOnlyShouldOnlyExcludeMatched() {
SubsetConfiguration bl = new ConfigBuilder()
.add("p.exclude", "foo")
.add("p.exclude.tags", "foo:f").subset("p");
shouldAccept(bl, "bar");
shouldAccept(bl, Arrays.asList(tag("bar", "", "")));
shouldAccept(bl, mockMetricsRecord("bar", Arrays.asList(
tag("bar", "", ""))));
shouldReject(bl, "foo");
shouldReject(bl, Arrays.asList(tag("bar", "", ""),
tag("foo", "", "f")), new boolean[] {true, false});
shouldReject(bl, mockMetricsRecord("foo", Arrays.asList(
tag("bar", "", ""))));
shouldReject(bl, mockMetricsRecord("bar", Arrays.asList(
tag("bar", "", ""), tag("foo", "", "f"))));
}
/**
* Filters should accepts unmatched item when both include and
* exclude patterns are present.
*/
@Test public void shouldAcceptUnmatchedWhenBothAreConfigured() {
SubsetConfiguration c = new ConfigBuilder()
.add("p.include", "foo")
.add("p.include.tags", "foo:f")
.add("p.exclude", "bar")
.add("p.exclude.tags", "bar:b").subset("p");
shouldAccept(c, "foo");
shouldAccept(c, Arrays.asList(tag("foo", "", "f")));
shouldAccept(c, mockMetricsRecord("foo", Arrays.asList(
tag("foo", "", "f"))));
shouldReject(c, "bar");
shouldReject(c, Arrays.asList(tag("bar", "", "b")));
shouldReject(c, mockMetricsRecord("bar", Arrays.asList(
tag("foo", "", "f"))));
shouldReject(c, mockMetricsRecord("foo", Arrays.asList(
tag("bar", "", "b"))));
shouldAccept(c, "foobar");
shouldAccept(c, Arrays.asList(tag("foobar", "", "")));
shouldAccept(c, mockMetricsRecord("foobar", Arrays.asList(
tag("foobar", "", ""))));
}
/**
* Include patterns should take precedence over exclude patterns
*/
@Test public void includeShouldOverrideExclude() {
SubsetConfiguration c = new ConfigBuilder()
.add("p.include", "foo")
.add("p.include.tags", "foo:f")
.add("p.exclude", "foo")
.add("p.exclude.tags", "foo:f").subset("p");
shouldAccept(c, "foo");
shouldAccept(c, Arrays.asList(tag("foo", "", "f")));
shouldAccept(c, mockMetricsRecord("foo", Arrays.asList(
tag("foo", "", "f"))));
}
static void shouldAccept(SubsetConfiguration conf, String s) {
assertTrue("accepts "+ s, newGlobFilter(conf).accepts(s));
assertTrue("accepts "+ s, newRegexFilter(conf).accepts(s));
}
// Version for one tag:
static void shouldAccept(SubsetConfiguration conf, List<MetricsTag> tags) {
shouldAcceptImpl(true, conf, tags, new boolean[] {true});
}
// Version for multiple tags:
static void shouldAccept(SubsetConfiguration conf, List<MetricsTag> tags,
boolean[] expectedAcceptedSpec) {
shouldAcceptImpl(true, conf, tags, expectedAcceptedSpec);
}
// Version for one tag:
static void shouldReject(SubsetConfiguration conf, List<MetricsTag> tags) {
shouldAcceptImpl(false, conf, tags, new boolean[] {false});
}
// Version for multiple tags:
static void shouldReject(SubsetConfiguration conf, List<MetricsTag> tags,
boolean[] expectedAcceptedSpec) {
shouldAcceptImpl(false, conf, tags, expectedAcceptedSpec);
}
private static void shouldAcceptImpl(final boolean expectAcceptList,
SubsetConfiguration conf, List<MetricsTag> tags, boolean[] expectedAcceptedSpec) {
final MetricsFilter globFilter = newGlobFilter(conf);
final MetricsFilter regexFilter = newRegexFilter(conf);
// Test acceptance of the tag list:
assertEquals("accepts "+ tags, expectAcceptList, globFilter.accepts(tags));
assertEquals("accepts "+ tags, expectAcceptList, regexFilter.accepts(tags));
// Test results on each of the individual tags:
int acceptedCount = 0;
for (int i=0; i<tags.size(); i++) {
MetricsTag tag = tags.get(i);
boolean actGlob = globFilter.accepts(tag);
boolean actRegex = regexFilter.accepts(tag);
assertEquals("accepts "+tag, expectedAcceptedSpec[i], actGlob);
// Both the filters should give the same result:
assertEquals(actGlob, actRegex);
if (actGlob) {
acceptedCount++;
}
}
if (expectAcceptList) {
// At least one individual tag should be accepted:
assertTrue("No tag of the following accepted: " + tags, acceptedCount > 0);
} else {
// At least one individual tag should be rejected:
assertTrue("No tag of the following rejected: " + tags, acceptedCount < tags.size());
}
}
/**
* Asserts that filters with the given configuration accept the given record.
*
* @param conf SubsetConfiguration containing filter configuration
* @param record MetricsRecord to check
*/
static void shouldAccept(SubsetConfiguration conf, MetricsRecord record) {
assertTrue("accepts " + record, newGlobFilter(conf).accepts(record));
assertTrue("accepts " + record, newRegexFilter(conf).accepts(record));
}
static void shouldReject(SubsetConfiguration conf, String s) {
assertTrue("rejects "+ s, !newGlobFilter(conf).accepts(s));
assertTrue("rejects "+ s, !newRegexFilter(conf).accepts(s));
}
/**
* Asserts that filters with the given configuration reject the given record.
*
* @param conf SubsetConfiguration containing filter configuration
* @param record MetricsRecord to check
*/
static void shouldReject(SubsetConfiguration conf, MetricsRecord record) {
assertTrue("rejects " + record, !newGlobFilter(conf).accepts(record));
assertTrue("rejects " + record, !newRegexFilter(conf).accepts(record));
}
/**
* Create a new glob filter with a config object
* @param conf the config object
* @return the filter
*/
public static GlobFilter newGlobFilter(SubsetConfiguration conf) {
GlobFilter f = new GlobFilter();
f.init(conf);
return f;
}
/**
* Create a new regex filter with a config object
* @param conf the config object
* @return the filter
*/
public static RegexFilter newRegexFilter(SubsetConfiguration conf) {
RegexFilter f = new RegexFilter();
f.init(conf);
return f;
}
/**
* Creates a mock MetricsRecord with the given name and tags.
*
* @param name String name
* @param tags List<MetricsTag> tags
* @return MetricsRecord newly created mock
*/
private static MetricsRecord mockMetricsRecord(String name,
List<MetricsTag> tags) {
MetricsRecord record = mock(MetricsRecord.class);
when(record.name()).thenReturn(name);
when(record.tags()).thenReturn(tags);
return record;
}
}
| 9,306 | 36.079681 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestMetricsCache.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.util;
import java.util.Arrays;
import java.util.Collection;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import static org.apache.hadoop.metrics2.lib.Interns.*;
public class TestMetricsCache {
private static final Log LOG = LogFactory.getLog(TestMetricsCache.class);
@SuppressWarnings("deprecation")
@Test public void testUpdate() {
MetricsCache cache = new MetricsCache();
MetricsRecord mr = makeRecord("r",
Arrays.asList(makeTag("t", "tv")),
Arrays.asList(makeMetric("m", 0), makeMetric("m1", 1)));
MetricsCache.Record cr = cache.update(mr);
verify(mr).name();
verify(mr).tags();
verify(mr).metrics();
assertEquals("same record size", cr.metrics().size(),
((Collection<AbstractMetric>)mr.metrics()).size());
assertEquals("same metric value", 0, cr.getMetric("m"));
MetricsRecord mr2 = makeRecord("r",
Arrays.asList(makeTag("t", "tv")),
Arrays.asList(makeMetric("m", 2), makeMetric("m2", 42)));
cr = cache.update(mr2);
assertEquals("contains 3 metric", 3, cr.metrics().size());
checkMetricValue("updated metric value", cr, "m", 2);
checkMetricValue("old metric value", cr, "m1", 1);
checkMetricValue("new metric value", cr, "m2", 42);
MetricsRecord mr3 = makeRecord("r",
Arrays.asList(makeTag("t", "tv3")), // different tag value
Arrays.asList(makeMetric("m3", 3)));
cr = cache.update(mr3); // should get a new record
assertEquals("contains 1 metric", 1, cr.metrics().size());
checkMetricValue("updated metric value", cr, "m3", 3);
// tags cache should be empty so far
assertEquals("no tags", 0, cr.tags().size());
// until now
cr = cache.update(mr3, true);
assertEquals("Got 1 tag", 1, cr.tags().size());
assertEquals("Tag value", "tv3", cr.getTag("t"));
checkMetricValue("Metric value", cr, "m3", 3);
}
@SuppressWarnings("deprecation")
@Test public void testGet() {
MetricsCache cache = new MetricsCache();
assertNull("empty", cache.get("r", Arrays.asList(makeTag("t", "t"))));
MetricsRecord mr = makeRecord("r",
Arrays.asList(makeTag("t", "t")),
Arrays.asList(makeMetric("m", 1)));
cache.update(mr);
MetricsCache.Record cr = cache.get("r", mr.tags());
LOG.debug("tags="+ mr.tags() +" cr="+ cr);
assertNotNull("Got record", cr);
assertEquals("contains 1 metric", 1, cr.metrics().size());
checkMetricValue("new metric value", cr, "m", 1);
}
/**
* Make sure metrics tag has a sane hashCode impl
*/
@Test public void testNullTag() {
MetricsCache cache = new MetricsCache();
MetricsRecord mr = makeRecord("r",
Arrays.asList(makeTag("t", null)),
Arrays.asList(makeMetric("m", 0), makeMetric("m1", 1)));
MetricsCache.Record cr = cache.update(mr);
assertTrue("t value should be null", null == cr.getTag("t"));
}
@Test public void testOverflow() {
MetricsCache cache = new MetricsCache();
MetricsCache.Record cr;
Collection<MetricsTag> t0 = Arrays.asList(makeTag("t0", "0"));
for (int i = 0; i < MetricsCache.MAX_RECS_PER_NAME_DEFAULT + 1; ++i) {
cr = cache.update(makeRecord("r",
Arrays.asList(makeTag("t"+ i, ""+ i)),
Arrays.asList(makeMetric("m", i))));
checkMetricValue("new metric value", cr, "m", i);
if (i < MetricsCache.MAX_RECS_PER_NAME_DEFAULT) {
assertNotNull("t0 is still there", cache.get("r", t0));
}
}
assertNull("t0 is gone", cache.get("r", t0));
}
private void checkMetricValue(String description, MetricsCache.Record cr,
String key, Number val) {
assertEquals(description, val, cr.getMetric(key));
assertNotNull("metric not null", cr.getMetricInstance(key));
assertEquals(description, val, cr.getMetricInstance(key).value());
}
private MetricsRecord makeRecord(String name, Collection<MetricsTag> tags,
Collection<AbstractMetric> metrics) {
MetricsRecord mr = mock(MetricsRecord.class);
when(mr.name()).thenReturn(name);
when(mr.tags()).thenReturn(tags);
when(mr.metrics()).thenReturn(metrics);
return mr;
}
private MetricsTag makeTag(String name, String value) {
return new MetricsTag(info(name, ""), value);
}
private AbstractMetric makeMetric(String name, Number value) {
AbstractMetric metric = mock(AbstractMetric.class);
when(metric.name()).thenReturn(name);
when(metric.value()).thenReturn(value);
return metric;
}
}
| 5,639 | 36.852349 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleStat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.util;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Test the running sample stat computation
*/
public class TestSampleStat {
private static final double EPSILON = 1e-42;
/**
* Some simple use cases
*/
@Test public void testSimple() {
SampleStat stat = new SampleStat();
assertEquals("num samples", 0, stat.numSamples());
assertEquals("mean", 0.0, stat.mean(), EPSILON);
assertEquals("variance", 0.0, stat.variance(), EPSILON);
assertEquals("stddev", 0.0, stat.stddev(), EPSILON);
assertEquals("min", SampleStat.MinMax.DEFAULT_MIN_VALUE, stat.min(), EPSILON);
assertEquals("max", SampleStat.MinMax.DEFAULT_MAX_VALUE, stat.max(), EPSILON);
stat.add(3);
assertEquals("num samples", 1L, stat.numSamples());
assertEquals("mean", 3.0, stat.mean(), EPSILON);
assertEquals("variance", 0.0, stat.variance(), EPSILON);
assertEquals("stddev", 0.0, stat.stddev(), EPSILON);
assertEquals("min", 3.0, stat.min(), EPSILON);
assertEquals("max", 3.0, stat.max(), EPSILON);
stat.add(2).add(1);
assertEquals("num samples", 3L, stat.numSamples());
assertEquals("mean", 2.0, stat.mean(), EPSILON);
assertEquals("variance", 1.0, stat.variance(), EPSILON);
assertEquals("stddev", 1.0, stat.stddev(), EPSILON);
assertEquals("min", 1.0, stat.min(), EPSILON);
assertEquals("max", 3.0, stat.max(), EPSILON);
stat.reset();
assertEquals("num samples", 0, stat.numSamples());
assertEquals("mean", 0.0, stat.mean(), EPSILON);
assertEquals("variance", 0.0, stat.variance(), EPSILON);
assertEquals("stddev", 0.0, stat.stddev(), EPSILON);
assertEquals("min", SampleStat.MinMax.DEFAULT_MIN_VALUE, stat.min(), EPSILON);
assertEquals("max", SampleStat.MinMax.DEFAULT_MAX_VALUE, stat.max(), EPSILON);
}
}
| 2,663 | 38.176471 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleQuantiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.util;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.Random;
import org.junit.Before;
import org.junit.Test;
public class TestSampleQuantiles {
static final Quantile[] quantiles = { new Quantile(0.50, 0.050),
new Quantile(0.75, 0.025), new Quantile(0.90, 0.010),
new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) };
SampleQuantiles estimator;
@Before
public void init() {
estimator = new SampleQuantiles(quantiles);
}
/**
* Check that the counts of the number of items in the window and sample are
* incremented correctly as items are added.
*/
@Test
public void testCount() throws IOException {
// Counts start off zero
assertEquals(estimator.getCount(), 0);
assertEquals(estimator.getSampleCount(), 0);
// Snapshot should be null if there are no entries.
assertNull(estimator.snapshot());
// Count increment correctly by 1
estimator.insert(1337);
assertEquals(estimator.getCount(), 1);
estimator.snapshot();
assertEquals(estimator.getSampleCount(), 1);
assertEquals(
"50.00 %ile +/- 5.00%: 1337\n" +
"75.00 %ile +/- 2.50%: 1337\n" +
"90.00 %ile +/- 1.00%: 1337\n" +
"95.00 %ile +/- 0.50%: 1337\n" +
"99.00 %ile +/- 0.10%: 1337", estimator.toString());
}
/**
* Check that counts and quantile estimates are correctly reset after a call
* to {@link SampleQuantiles#clear()}.
*/
@Test
public void testClear() throws IOException {
for (int i = 0; i < 1000; i++) {
estimator.insert(i);
}
estimator.clear();
assertEquals(estimator.getCount(), 0);
assertEquals(estimator.getSampleCount(), 0);
assertNull(estimator.snapshot());
}
/**
* Correctness test that checks that absolute error of the estimate is within
* specified error bounds for some randomly permuted streams of items.
*/
@Test
public void testQuantileError() throws IOException {
final int count = 100000;
Random r = new Random(0xDEADDEAD);
Long[] values = new Long[count];
for (int i = 0; i < count; i++) {
values[i] = (long) (i + 1);
}
// Do 10 shuffle/insert/check cycles
for (int i = 0; i < 10; i++) {
System.out.println("Starting run " + i);
Collections.shuffle(Arrays.asList(values), r);
estimator.clear();
for (int j = 0; j < count; j++) {
estimator.insert(values[j]);
}
Map<Quantile, Long> snapshot;
snapshot = estimator.snapshot();
for (Quantile q : quantiles) {
long actual = (long) (q.quantile * count);
long error = (long) (q.error * count);
long estimate = snapshot.get(q);
System.out
.println(String.format("Expected %d with error %d, estimated %d",
actual, error, estimate));
assertTrue(estimate <= actual + error);
assertTrue(estimate >= actual - error);
}
}
}
}
| 3,878 | 30.795082 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* Utility class mainly for tests
*/
public class MetricsRecords {
public static void assertTag(MetricsRecord record, String tagName,
String expectedValue) {
MetricsTag processIdTag = getFirstTagByName(record,
tagName);
assertNotNull(processIdTag);
assertEquals(expectedValue, processIdTag.value());
}
public static void assertMetric(MetricsRecord record,
String metricName,
Number expectedValue) {
AbstractMetric resourceLimitMetric = getFirstMetricByName(
record, metricName);
assertNotNull(resourceLimitMetric);
assertEquals(expectedValue, resourceLimitMetric.value());
}
private static MetricsTag getFirstTagByName(MetricsRecord record, String name) {
return Iterables.getFirst(Iterables.filter(record.tags(),
new MetricsTagPredicate(name)), null);
}
private static AbstractMetric getFirstMetricByName(
MetricsRecord record, String name) {
return Iterables.getFirst(
Iterables.filter(record.metrics(), new AbstractMetricPredicate(name)),
null);
}
private static class MetricsTagPredicate implements Predicate<MetricsTag> {
private String tagName;
public MetricsTagPredicate(String tagName) {
this.tagName = tagName;
}
@Override
public boolean apply(MetricsTag input) {
return input.name().equals(tagName);
}
}
private static class AbstractMetricPredicate
implements Predicate<AbstractMetric> {
private String metricName;
public AbstractMetricPredicate(
String metricName) {
this.metricName = metricName;
}
@Override
public boolean apply(AbstractMetric input) {
return input.name().equals(metricName);
}
}
}
| 2,906 | 30.258065 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsCollectorImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.configuration.SubsetConfiguration;
import static org.apache.hadoop.metrics2.filter.TestPatternFilter.*;
import static org.apache.hadoop.metrics2.lib.Interns.*;
public class TestMetricsCollectorImpl {
@Test public void recordBuilderShouldNoOpIfFiltered() {
SubsetConfiguration fc = new ConfigBuilder()
.add("p.exclude", "foo").subset("p");
MetricsCollectorImpl mb = new MetricsCollectorImpl();
mb.setRecordFilter(newGlobFilter(fc));
MetricsRecordBuilderImpl rb = mb.addRecord("foo");
rb.tag(info("foo", ""), "value").addGauge(info("g0", ""), 1);
assertEquals("no tags", 0, rb.tags().size());
assertEquals("no metrics", 0, rb.metrics().size());
assertNull("null record", rb.getRecord());
assertEquals("no records", 0, mb.getRecords().size());
}
@Test public void testPerMetricFiltering() {
SubsetConfiguration fc = new ConfigBuilder()
.add("p.exclude", "foo").subset("p");
MetricsCollectorImpl mb = new MetricsCollectorImpl();
mb.setMetricFilter(newGlobFilter(fc));
MetricsRecordBuilderImpl rb = mb.addRecord("foo");
rb.tag(info("foo", ""), "").addCounter(info("c0", ""), 0)
.addGauge(info("foo", ""), 1);
assertEquals("1 tag", 1, rb.tags().size());
assertEquals("1 metric", 1, rb.metrics().size());
assertEquals("expect foo tag", "foo", rb.tags().get(0).name());
assertEquals("expect c0", "c0", rb.metrics().get(0).name());
}
}
| 2,366 | 40.526316 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.SocketException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.metrics2.sink.ganglia.AbstractGangliaSink;
import org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30;
import org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31;
import org.apache.hadoop.metrics2.sink.ganglia.GangliaMetricsTestHelper;
import org.junit.Test;
public class TestGangliaMetrics {
public static final Log LOG = LogFactory.getLog(TestMetricsSystemImpl.class);
private final String[] expectedMetrics =
{ "test.s1rec.C1",
"test.s1rec.G1",
"test.s1rec.Xxx",
"test.s1rec.Yyy",
"test.s1rec.S1NumOps",
"test.s1rec.S1AvgTime" };
@Test
public void testTagsForPrefix() throws Exception {
ConfigBuilder cb = new ConfigBuilder()
.add("test.sink.ganglia.tagsForPrefix.all", "*")
.add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
"NumActiveSources")
.add("test.sink.ganglia.tagsForPrefix.none", "");
GangliaSink30 sink = new GangliaSink30();
sink.init(cb.subset("test.sink.ganglia"));
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.NumActiveSources, "foo"));
tags.add(new MetricsTag(MsInfo.NumActiveSinks, "bar"));
tags.add(new MetricsTag(MsInfo.NumAllSinks, "haa"));
tags.add(new MetricsTag(MsInfo.Hostname, "host"));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 1, tags, metrics);
StringBuilder sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals(".NumActiveSources=foo.NumActiveSinks=bar.NumAllSinks=haa", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "some"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals(".NumActiveSources=foo.NumActiveSinks=bar", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "none"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals("", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "nada"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals("", sb.toString());
}
@Test public void testGangliaMetrics2() throws Exception {
ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
.add("test.sink.gsink30.context", "test") // filter out only "test"
.add("test.sink.gsink31.context", "test") // filter out only "test"
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
final int expectedCountFromGanglia30 = expectedMetrics.length;
final int expectedCountFromGanglia31 = 2 * expectedMetrics.length;
// Setup test for GangliaSink30
AbstractGangliaSink gsink30 = new GangliaSink30();
gsink30.init(cb.subset("test"));
MockDatagramSocket mockds30 = new MockDatagramSocket();
GangliaMetricsTestHelper.setDatagramSocket(gsink30, mockds30);
// Setup test for GangliaSink31
AbstractGangliaSink gsink31 = new GangliaSink31();
gsink31.init(cb.subset("test"));
MockDatagramSocket mockds31 = new MockDatagramSocket();
GangliaMetricsTestHelper.setDatagramSocket(gsink31, mockds31);
// register the sinks
ms.register("gsink30", "gsink30 desc", gsink30);
ms.register("gsink31", "gsink31 desc", gsink31);
ms.publishMetricsNow(); // publish the metrics
ms.stop();
// check GanfliaSink30 data
checkMetrics(mockds30.getCapturedSend(), expectedCountFromGanglia30);
// check GanfliaSink31 data
checkMetrics(mockds31.getCapturedSend(), expectedCountFromGanglia31);
}
// check the expected against the actual metrics
private void checkMetrics(List<byte[]> bytearrlist, int expectedCount) {
boolean[] foundMetrics = new boolean[expectedMetrics.length];
for (byte[] bytes : bytearrlist) {
String binaryStr = new String(bytes);
for (int index = 0; index < expectedMetrics.length; index++) {
if (binaryStr.indexOf(expectedMetrics[index]) >= 0) {
foundMetrics[index] = true;
break;
}
}
}
for (int index = 0; index < foundMetrics.length; index++) {
if (!foundMetrics[index]) {
assertTrue("Missing metrics: " + expectedMetrics[index], false);
}
}
assertEquals("Mismatch in record count: ",
expectedCount, bytearrlist.size());
}
@SuppressWarnings("unused")
@Metrics(context="test")
private static class TestSource {
@Metric("C1 desc") MutableCounterLong c1;
@Metric("XXX desc") MutableCounterLong xxx;
@Metric("G1 desc") MutableGaugeLong g1;
@Metric("YYY desc") MutableGaugeLong yyy;
@Metric MutableRate s1;
final MetricsRegistry registry;
TestSource(String recName) {
registry = new MetricsRegistry(recName);
}
}
/**
* This class is used to capture data send to Ganglia servers.
*
* Initial attempt was to use mockito to mock and capture but
* while testing figured out that mockito is keeping the reference
* to the byte array and since the sink code reuses the byte array
* hence all the captured byte arrays were pointing to one instance.
*/
private class MockDatagramSocket extends DatagramSocket {
private ArrayList<byte[]> capture;
/**
* @throws SocketException
*/
public MockDatagramSocket() throws SocketException {
capture = new ArrayList<byte[]>();
}
/* (non-Javadoc)
* @see java.net.DatagramSocket#send(java.net.DatagramPacket)
*/
@Override
public void send(DatagramPacket p) throws IOException {
// capture the byte arrays
byte[] bytes = new byte[p.getLength()];
System.arraycopy(p.getData(), p.getOffset(), bytes, 0, p.getLength());
capture.add(bytes);
}
/**
* @return the captured byte arrays
*/
ArrayList<byte[]> getCapturedSend() {
return capture;
}
}
}
| 7,907 | 35.109589 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import java.util.ConcurrentModificationException;
import java.util.concurrent.CountDownLatch;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import static org.apache.hadoop.metrics2.impl.SinkQueue.*;
/**
* Test the half-blocking metrics sink queue
*/
public class TestSinkQueue {
private static final Log LOG = LogFactory.getLog(TestSinkQueue.class);
/**
* Test common use case
* @throws Exception
*/
@Test public void testCommon() throws Exception {
final SinkQueue<Integer> q = new SinkQueue<Integer>(2);
q.enqueue(1);
assertEquals("queue front", 1, (int) q.front());
assertEquals("queue back", 1, (int) q.back());
assertEquals("element", 1, (int) q.dequeue());
assertTrue("should enqueue", q.enqueue(2));
q.consume(new Consumer<Integer>() {
@Override public void consume(Integer e) {
assertEquals("element", 2, (int) e);
}
});
assertTrue("should enqueue", q.enqueue(3));
assertEquals("element", 3, (int) q.dequeue());
assertEquals("queue size", 0, q.size());
assertEquals("queue front", null, q.front());
assertEquals("queue back", null, q.back());
}
/**
* Test blocking when queue is empty
* @throws Exception
*/
@Test public void testEmptyBlocking() throws Exception {
testEmptyBlocking(0);
testEmptyBlocking(100);
}
private void testEmptyBlocking(int awhile) throws Exception {
final SinkQueue<Integer> q = new SinkQueue<Integer>(2);
final Runnable trigger = mock(Runnable.class);
// try consuming emtpy equeue and blocking
Thread t = new Thread() {
@Override public void run() {
try {
assertEquals("element", 1, (int) q.dequeue());
q.consume(new Consumer<Integer>() {
@Override public void consume(Integer e) {
assertEquals("element", 2, (int) e);
trigger.run();
}
});
}
catch (InterruptedException e) {
LOG.warn("Interrupted", e);
}
}
};
t.start();
// Should work with or without sleep
if (awhile > 0) {
Thread.sleep(awhile);
}
q.enqueue(1);
q.enqueue(2);
t.join();
verify(trigger).run();
}
/**
* Test nonblocking enqueue when queue is full
* @throws Exception
*/
@Test public void testFull() throws Exception {
final SinkQueue<Integer> q = new SinkQueue<Integer>(1);
q.enqueue(1);
assertTrue("should drop", !q.enqueue(2));
assertEquals("element", 1, (int) q.dequeue());
q.enqueue(3);
q.consume(new Consumer<Integer>() {
@Override public void consume(Integer e) {
assertEquals("element", 3, (int) e);
}
});
assertEquals("queue size", 0, q.size());
}
/**
* Test the consumeAll method
* @throws Exception
*/
@Test public void testConsumeAll() throws Exception {
final int capacity = 64; // arbitrary
final SinkQueue<Integer> q = new SinkQueue<Integer>(capacity);
for (int i = 0; i < capacity; ++i) {
assertTrue("should enqueue", q.enqueue(i));
}
assertTrue("should not enqueue", !q.enqueue(capacity));
final Runnable trigger = mock(Runnable.class);
q.consumeAll(new Consumer<Integer>() {
private int expected = 0;
@Override public void consume(Integer e) {
assertEquals("element", expected++, (int) e);
trigger.run();
}
});
verify(trigger, times(capacity)).run();
}
/**
* Test the consumer throwing exceptions
* @throws Exception
*/
@Test public void testConsumerException() throws Exception {
final SinkQueue<Integer> q = new SinkQueue<Integer>(1);
final RuntimeException ex = new RuntimeException("expected");
q.enqueue(1);
try {
q.consume(new Consumer<Integer>() {
@Override public void consume(Integer e) {
throw ex;
}
});
}
catch (Exception expected) {
assertSame("consumer exception", ex, expected);
}
// The queue should be in consistent state after exception
assertEquals("queue size", 1, q.size());
assertEquals("element", 1, (int) q.dequeue());
}
/**
* Test the clear method
*/
@Test public void testClear() {
final SinkQueue<Integer> q = new SinkQueue<Integer>(128);
for (int i = 0; i < q.capacity() + 97; ++i) {
q.enqueue(i);
}
assertEquals("queue size", q.capacity(), q.size());
q.clear();
assertEquals("queue size", 0, q.size());
}
/**
* Test consumers that take their time.
* @throws Exception
*/
@Test public void testHangingConsumer() throws Exception {
SinkQueue<Integer> q = newSleepingConsumerQueue(2, 1, 2);
assertEquals("queue back", 2, (int) q.back());
assertTrue("should drop", !q.enqueue(3)); // should not block
assertEquals("queue size", 2, q.size());
assertEquals("queue head", 1, (int) q.front());
assertEquals("queue back", 2, (int) q.back());
}
/**
* Test concurrent consumer access, which is illegal
* @throws Exception
*/
@Test public void testConcurrentConsumers() throws Exception {
final SinkQueue<Integer> q = newSleepingConsumerQueue(2, 1);
assertTrue("should enqueue", q.enqueue(2));
assertEquals("queue back", 2, (int) q.back());
assertTrue("should drop", !q.enqueue(3)); // should not block
shouldThrowCME(new Fun() {
@Override public void run() {
q.clear();
}
});
shouldThrowCME(new Fun() {
@Override public void run() throws Exception {
q.consume(null);
}
});
shouldThrowCME(new Fun() {
@Override public void run() throws Exception {
q.consumeAll(null);
}
});
shouldThrowCME(new Fun() {
@Override public void run() throws Exception {
q.dequeue();
}
});
// The queue should still be in consistent state after all the exceptions
assertEquals("queue size", 2, q.size());
assertEquals("queue front", 1, (int) q.front());
assertEquals("queue back", 2, (int) q.back());
}
private void shouldThrowCME(Fun callback) throws Exception {
try {
callback.run();
}
catch (ConcurrentModificationException e) {
LOG.info(e);
return;
}
LOG.error("should've thrown CME");
fail("should've thrown CME");
}
private SinkQueue<Integer> newSleepingConsumerQueue(int capacity,
int... values) throws Exception {
final SinkQueue<Integer> q = new SinkQueue<Integer>(capacity);
for (int i : values) {
q.enqueue(i);
}
final CountDownLatch barrier = new CountDownLatch(1);
Thread t = new Thread() {
@Override public void run() {
try {
Thread.sleep(10); // causes failure without barrier
q.consume(new Consumer<Integer>() {
@Override
public void consume(Integer e) throws InterruptedException {
LOG.info("sleeping");
barrier.countDown();
Thread.sleep(1000 * 86400); // a long time
}
});
}
catch (InterruptedException ex) {
LOG.warn("Interrupted", ex);
}
}
};
t.setName("Sleeping consumer");
t.setDaemon(true); // so jvm can exit
t.start();
barrier.await();
LOG.debug("Returning new sleeping consumer queue");
return q;
}
static interface Fun {
void run() throws Exception;
}
}
| 8,388 | 28.854093 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.*;
import org.junit.runner.RunWith;
import static org.mockito.Mockito.*;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.runners.MockitoJUnitRunner;
import org.apache.hadoop.metrics2.MetricsVisitor;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsInfo;
import static org.apache.hadoop.metrics2.lib.Interns.*;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
/**
* Test the metric visitor interface
*/
@RunWith(MockitoJUnitRunner.class)
public class TestMetricsVisitor {
@Captor private ArgumentCaptor<MetricsInfo> c1;
@Captor private ArgumentCaptor<MetricsInfo> c2;
@Captor private ArgumentCaptor<MetricsInfo> g1;
@Captor private ArgumentCaptor<MetricsInfo> g2;
@Captor private ArgumentCaptor<MetricsInfo> g3;
@Captor private ArgumentCaptor<MetricsInfo> g4;
/**
* Test the common use cases
*/
@Test public void testCommon() {
MetricsVisitor visitor = mock(MetricsVisitor.class);
MetricsRegistry registry = new MetricsRegistry("test");
List<AbstractMetric> metrics = MetricsLists.builder("test")
.addCounter(info("c1", "int counter"), 1)
.addCounter(info("c2", "long counter"), 2L)
.addGauge(info("g1", "int gauge"), 5)
.addGauge(info("g2", "long gauge"), 6L)
.addGauge(info("g3", "float gauge"), 7f)
.addGauge(info("g4", "double gauge"), 8d)
.metrics();
for (AbstractMetric metric : metrics) {
metric.visit(visitor);
}
verify(visitor).counter(c1.capture(), eq(1));
assertEquals("c1 name", "c1", c1.getValue().name());
assertEquals("c1 description", "int counter", c1.getValue().description());
verify(visitor).counter(c2.capture(), eq(2L));
assertEquals("c2 name", "c2", c2.getValue().name());
assertEquals("c2 description", "long counter", c2.getValue().description());
verify(visitor).gauge(g1.capture(), eq(5));
assertEquals("g1 name", "g1", g1.getValue().name());
assertEquals("g1 description", "int gauge", g1.getValue().description());
verify(visitor).gauge(g2.capture(), eq(6L));
assertEquals("g2 name", "g2", g2.getValue().name());
assertEquals("g2 description", "long gauge", g2.getValue().description());
verify(visitor).gauge(g3.capture(), eq(7f));
assertEquals("g3 name", "g3", g3.getValue().name());
assertEquals("g3 description", "float gauge", g3.getValue().description());
verify(visitor).gauge(g4.capture(), eq(8d));
assertEquals("g4 name", "g4", g4.getValue().name());
assertEquals("g4 description", "double gauge", g4.getValue().description());
}
}
| 3,565 | 38.622222 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.sink.GraphiteSink;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.internal.util.reflection.Whitebox;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.Collections;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.reset;
public class TestGraphiteMetrics {
private AbstractMetric makeMetric(String name, Number value) {
AbstractMetric metric = mock(AbstractMetric.class);
when(metric.name()).thenReturn(name);
when(metric.value()).thenReturn(value);
return metric;
}
private GraphiteSink.Graphite makeGraphite() {
GraphiteSink.Graphite mockGraphite = mock(GraphiteSink.Graphite.class);
when(mockGraphite.isConnected()).thenReturn(true);
return mockGraphite;
}
@Test
public void testPutMetrics() {
GraphiteSink sink = new GraphiteSink();
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.Hostname, "host"));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1.25));
metrics.add(makeMetric("foo2", 2.25));
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
sink.putMetrics(record);
try {
verify(mockGraphite).write(argument.capture());
} catch (IOException e) {
e.printStackTrace();
}
String result = argument.getValue();
assertEquals(true,
result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" +
"null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") ||
result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" +
"null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n"));
}
@Test
public void testPutMetrics2() {
GraphiteSink sink = new GraphiteSink();
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.Hostname, null));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1));
metrics.add(makeMetric("foo2", 2));
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
sink.putMetrics(record);
try {
verify(mockGraphite).write(argument.capture());
} catch (IOException e) {
e.printStackTrace();
}
String result = argument.getValue();
assertEquals(true,
result.equals("null.all.Context.Context=all.foo1 1 10\n" +
"null.all.Context.Context=all.foo2 2 10\n") ||
result.equals("null.all.Context.Context=all.foo2 2 10\n" +
"null.all.Context.Context=all.foo1 1 10\n"));
}
/**
* Assert that timestamps are converted correctly, ticket HADOOP-11182
*/
@Test
public void testPutMetrics3() {
// setup GraphiteSink
GraphiteSink sink = new GraphiteSink();
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
// given two metrics records with timestamps 1000 milliseconds apart.
List<MetricsTag> tags = Collections.emptyList();
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1));
MetricsRecord record1 = new MetricsRecordImpl(MsInfo.Context, 1000000000000L, tags, metrics);
MetricsRecord record2 = new MetricsRecordImpl(MsInfo.Context, 1000000001000L, tags, metrics);
sink.putMetrics(record1);
sink.putMetrics(record2);
sink.flush();
try {
sink.close();
} catch(IOException e) {
e.printStackTrace();
}
// then the timestamps in the graphite stream should differ by one second.
try {
verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000000\n"));
verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000001\n"));
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void testFailureAndPutMetrics() throws IOException {
GraphiteSink sink = new GraphiteSink();
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.Hostname, "host"));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1.25));
metrics.add(makeMetric("foo2", 2.25));
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
// throw exception when first try
doThrow(new IOException("IO exception")).when(mockGraphite).write(anyString());
sink.putMetrics(record);
verify(mockGraphite).write(anyString());
verify(mockGraphite).close();
// reset mock and try again
reset(mockGraphite);
when(mockGraphite.isConnected()).thenReturn(false);
ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
sink.putMetrics(record);
verify(mockGraphite).write(argument.capture());
String result = argument.getValue();
assertEquals(true,
result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" +
"null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") ||
result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" +
"null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n"));
}
@Test
public void testClose(){
GraphiteSink sink = new GraphiteSink();
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
try {
sink.close();
} catch (IOException ioe) {
ioe.printStackTrace();
}
try {
verify(mockGraphite).close();
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
}
| 8,164 | 36.800926 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import static org.junit.Assert.*;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.MetricsAnnotations;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MetricsSourceBuilder;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import static org.apache.hadoop.metrics2.lib.Interns.info;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
public class TestMetricsSourceAdapter {
@Test
public void testPurgeOldMetrics() throws Exception {
// create test source with a single metric counter of value 1
PurgableSource source = new PurgableSource();
MetricsSourceBuilder sb = MetricsAnnotations.newSourceBuilder(source);
final MetricsSource s = sb.build();
List<MetricsTag> injectedTags = new ArrayList<MetricsTag>();
MetricsSourceAdapter sa = new MetricsSourceAdapter(
"tst", "tst", "testdesc", s, injectedTags, null, null, 1, false);
MBeanInfo info = sa.getMBeanInfo();
boolean sawIt = false;
for (MBeanAttributeInfo mBeanAttributeInfo : info.getAttributes()) {
sawIt |= mBeanAttributeInfo.getName().equals(source.lastKeyName);
};
assertTrue("The last generated metric is not exported to jmx", sawIt);
Thread.sleep(1000); // skip JMX cache TTL
info = sa.getMBeanInfo();
sawIt = false;
for (MBeanAttributeInfo mBeanAttributeInfo : info.getAttributes()) {
sawIt |= mBeanAttributeInfo.getName().equals(source.lastKeyName);
};
assertTrue("The last generated metric is not exported to jmx", sawIt);
}
//generate a new key per each call
class PurgableSource implements MetricsSource {
int nextKey = 0;
String lastKeyName = null;
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder rb =
collector.addRecord("purgablesource")
.setContext("test");
lastKeyName = "key" + nextKey++;
rb.addGauge(info(lastKeyName, "desc"), 1);
}
}
@Test
public void testGetMetricsAndJmx() throws Exception {
// create test source with a single metric counter of value 0
TestSource source = new TestSource("test");
MetricsSourceBuilder sb = MetricsAnnotations.newSourceBuilder(source);
final MetricsSource s = sb.build();
List<MetricsTag> injectedTags = new ArrayList<MetricsTag>();
MetricsSourceAdapter sa = new MetricsSourceAdapter(
"test", "test", "test desc", s, injectedTags, null, null, 1, false);
// all metrics are initially assumed to have changed
MetricsCollectorImpl builder = new MetricsCollectorImpl();
Iterable<MetricsRecordImpl> metricsRecords = sa.getMetrics(builder, true);
// Validate getMetrics and JMX initial values
MetricsRecordImpl metricsRecord = metricsRecords.iterator().next();
assertEquals(0L,
metricsRecord.metrics().iterator().next().value().longValue());
Thread.sleep(100); // skip JMX cache TTL
assertEquals(0L, (Number)sa.getAttribute("C1"));
// change metric value
source.incrementCnt();
// validate getMetrics and JMX
builder = new MetricsCollectorImpl();
metricsRecords = sa.getMetrics(builder, true);
metricsRecord = metricsRecords.iterator().next();
assertTrue(metricsRecord.metrics().iterator().hasNext());
Thread.sleep(100); // skip JMX cache TTL
assertEquals(1L, (Number)sa.getAttribute("C1"));
}
@SuppressWarnings("unused")
@Metrics(context="test")
private static class TestSource {
@Metric("C1 desc") MutableCounterLong c1;
final MetricsRegistry registry;
TestSource(String recName) {
registry = new MetricsRegistry(recName);
}
public void incrementCnt() {
c1.incr();
}
}
}
| 5,015 | 35.086331 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/ConfigUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import java.io.PrintStream;
import java.util.Iterator;
import static org.junit.Assert.*;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.PropertiesConfiguration;
/**
* Helpers for config tests and debugging
*/
class ConfigUtil {
static void dump(Configuration c) {
dump(null, c, System.out);
}
static void dump(String header, Configuration c) {
dump(header, c, System.out);
}
static void dump(String header, Configuration c, PrintStream out) {
PropertiesConfiguration p = new PropertiesConfiguration();
p.copy(c);
if (header != null) {
out.println(header);
}
try { p.save(out); }
catch (Exception e) {
throw new RuntimeException("Error saving config", e);
}
}
static void assertEq(Configuration expected, Configuration actual) {
// Check that the actual config contains all the properties of the expected
for (Iterator<?> it = expected.getKeys(); it.hasNext();) {
String key = (String) it.next();
assertTrue("actual should contain "+ key, actual.containsKey(key));
assertEquals("value of "+ key, expected.getProperty(key),
actual.getProperty(key));
}
// Check that the actual config has no extra properties
for (Iterator<?> it = actual.getKeys(); it.hasNext();) {
String key = (String) it.next();
assertTrue("expected should contain "+ key, expected.containsKey(key));
}
}
}
| 2,335 | 33.352941 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsLists.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
/**
* Helper to create metrics list for testing
*/
class MetricsLists {
static MetricsRecordBuilderImpl builder(String name) {
return new MetricsCollectorImpl().addRecord(name);
}
}
| 1,042 | 33.766667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/ConfigBuilder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.configuration.SubsetConfiguration;
/**
* Helper class for building configs, mostly used in tests
*/
public class ConfigBuilder {
/** The built config */
public final PropertiesConfiguration config;
/**
* Default constructor
*/
public ConfigBuilder() {
config = new PropertiesConfiguration();
}
/**
* Add a property to the config
* @param key of the property
* @param value of the property
* @return self
*/
public ConfigBuilder add(String key, Object value) {
config.addProperty(key, value);
return this;
}
/**
* Save the config to a file
* @param filename to save
* @return self
* @throws RuntimeException
*/
public ConfigBuilder save(String filename) {
try {
config.save(filename);
}
catch (Exception e) {
throw new RuntimeException("Error saving config", e);
}
return this;
}
/**
* Return a subset configuration (so getParent() can be used.)
* @param prefix of the subset
* @return the subset config
*/
public SubsetConfiguration subset(String prefix) {
return new SubsetConfiguration(config, prefix, ".");
}
}
| 2,089 | 26.866667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import java.util.Map;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import static org.apache.hadoop.metrics2.impl.ConfigUtil.*;
/**
* Test metrics configuration
*/
public class TestMetricsConfig {
static final Log LOG = LogFactory.getLog(TestMetricsConfig.class);
/**
* Common use cases
* @throws Exception
*/
@Test public void testCommon() throws Exception {
String filename = getTestFilename("test-metrics2");
new ConfigBuilder()
.add("*.foo", "default foo")
.add("p1.*.bar", "p1 default bar")
.add("p1.t1.*.bar", "p1.t1 default bar")
.add("p1.t1.i1.name", "p1.t1.i1.name")
.add("p1.t1.42.bar", "p1.t1.42.bar")
.add("p1.t2.i1.foo", "p1.t2.i1.foo")
.add("p2.*.foo", "p2 default foo")
.save(filename);
MetricsConfig mc = MetricsConfig.create("p1", filename);
LOG.debug("mc:"+ mc);
Configuration expected = new ConfigBuilder()
.add("*.bar", "p1 default bar")
.add("t1.*.bar", "p1.t1 default bar")
.add("t1.i1.name", "p1.t1.i1.name")
.add("t1.42.bar", "p1.t1.42.bar")
.add("t2.i1.foo", "p1.t2.i1.foo")
.config;
assertEq(expected, mc);
testInstances(mc);
}
private void testInstances(MetricsConfig c) throws Exception {
Map<String, MetricsConfig> map = c.getInstanceConfigs("t1");
Map<String, MetricsConfig> map2 = c.getInstanceConfigs("t2");
assertEquals("number of t1 instances", 2, map.size());
assertEquals("number of t2 instances", 1, map2.size());
assertTrue("contains t1 instance i1", map.containsKey("i1"));
assertTrue("contains t1 instance 42", map.containsKey("42"));
assertTrue("contains t2 instance i1", map2.containsKey("i1"));
MetricsConfig t1i1 = map.get("i1");
MetricsConfig t1i42 = map.get("42");
MetricsConfig t2i1 = map2.get("i1");
LOG.debug("--- t1 instance i1:"+ t1i1);
LOG.debug("--- t1 instance 42:"+ t1i42);
LOG.debug("--- t2 instance i1:"+ t2i1);
Configuration t1expected1 = new ConfigBuilder()
.add("name", "p1.t1.i1.name").config;
Configuration t1expected42 = new ConfigBuilder()
.add("bar", "p1.t1.42.bar").config;
Configuration t2expected1 = new ConfigBuilder()
.add("foo", "p1.t2.i1.foo").config;
assertEq(t1expected1, t1i1);
assertEq(t1expected42, t1i42);
assertEq(t2expected1, t2i1);
LOG.debug("asserting foo == default foo");
// Check default lookups
assertEquals("value of foo in t1 instance i1", "default foo",
t1i1.getString("foo"));
assertEquals("value of bar in t1 instance i1", "p1.t1 default bar",
t1i1.getString("bar"));
assertEquals("value of foo in t1 instance 42", "default foo",
t1i42.getString("foo"));
assertEquals("value of foo in t2 instance i1", "p1.t2.i1.foo",
t2i1.getString("foo"));
assertEquals("value of bar in t2 instance i1", "p1 default bar",
t2i1.getString("bar"));
}
/**
* Should not throw if missing config files
*/
@Test public void testMissingFiles() {
MetricsConfig config = MetricsConfig.create("JobTracker", "non-existent.properties");
assertTrue(config.isEmpty());
}
/**
* Test the config file load order
* @throws Exception
*/
@Test public void testLoadFirst() throws Exception {
String filename = getTestFilename("hadoop-metrics2-p1");
new ConfigBuilder().add("p1.foo", "p1foo").save(filename);
MetricsConfig mc = MetricsConfig.create("p1");
MetricsConfig mc2 = MetricsConfig.create("p1", "na1", "na2", filename);
Configuration expected = new ConfigBuilder().add("foo", "p1foo").config;
assertEq(expected, mc);
assertEq(expected, mc2);
}
/**
* Return a test filename in the class path
* @param basename
* @return the filename
*/
public static String getTestFilename(String basename) {
return System.getProperty("test.build.classes", "target/test-classes") +
"/"+ basename +".properties";
}
}
| 5,055 | 33.868966 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import java.io.Closeable;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.*;
import javax.annotation.Nullable;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
import org.mockito.stubbing.Answer;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsException;
import static org.apache.hadoop.test.MoreAsserts.*;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.annotation.*;
import static org.apache.hadoop.metrics2.lib.Interns.*;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
/**
* Test the MetricsSystemImpl class
*/
@RunWith(MockitoJUnitRunner.class)
public class TestMetricsSystemImpl {
private static final Log LOG = LogFactory.getLog(TestMetricsSystemImpl.class);
static { DefaultMetricsSystem.setMiniClusterMode(true); }
@Captor private ArgumentCaptor<MetricsRecord> r1;
@Captor private ArgumentCaptor<MetricsRecord> r2;
private static String hostname = MetricsSystemImpl.getHostname();
public static class TestSink implements MetricsSink {
@Override public void putMetrics(MetricsRecord record) {
LOG.debug(record);
}
@Override public void flush() {}
@Override public void init(SubsetConfiguration conf) {
LOG.debug(MetricsConfig.toString(conf));
}
}
@Test public void testInitFirstVerifyStopInvokedImmediately() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("test.sink.test.class", TestSink.class.getName())
.add("test.*.source.filter.exclude", "s0")
.add("test.source.s1.metric.filter.exclude", "X*")
.add("test.sink.sink1.metric.filter.exclude", "Y*")
.add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
ms.register("s0", "s0 desc", new TestSource("s0rec"));
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1 = mock(MetricsSink.class);
MetricsSink sink2 = mock(MetricsSink.class);
ms.registerSink("sink1", "sink1 desc", sink1);
ms.registerSink("sink2", "sink2 desc", sink2);
ms.publishMetricsNow(); // publish the metrics
ms.stop();
ms.shutdown();
//When we call stop, at most two sources will be consumed by each sink thread.
verify(sink1, atMost(2)).putMetrics(r1.capture());
List<MetricsRecord> mr1 = r1.getAllValues();
verify(sink2, atMost(2)).putMetrics(r2.capture());
List<MetricsRecord> mr2 = r2.getAllValues();
if (mr1.size() != 0 && mr2.size() != 0) {
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
} else if (mr1.size() != 0) {
checkMetricsRecords(mr1);
} else if (mr2.size() != 0) {
checkMetricsRecords(mr2);
}
}
@Test public void testInitFirstVerifyCallBacks() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("test.sink.test.class", TestSink.class.getName())
.add("test.*.source.filter.exclude", "s0")
.add("test.source.s1.metric.filter.exclude", "X*")
.add("test.sink.sink1.metric.filter.exclude", "Y*")
.add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
ms.register("s0", "s0 desc", new TestSource("s0rec"));
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1 = mock(MetricsSink.class);
MetricsSink sink2 = mock(MetricsSink.class);
ms.registerSink("sink1", "sink1 desc", sink1);
ms.registerSink("sink2", "sink2 desc", sink2);
ms.publishMetricsNow(); // publish the metrics
try {
verify(sink1, timeout(200).times(2)).putMetrics(r1.capture());
verify(sink2, timeout(200).times(2)).putMetrics(r2.capture());
} finally {
ms.stop();
ms.shutdown();
}
//When we call stop, at most two sources will be consumed by each sink thread.
List<MetricsRecord> mr1 = r1.getAllValues();
List<MetricsRecord> mr2 = r2.getAllValues();
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
}
@Test public void testMultiThreadedPublish() throws Exception {
final int numThreads = 10;
new ConfigBuilder().add("*.period", 80)
.add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
numThreads)
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
final CollectingSink sink = new CollectingSink(numThreads);
ms.registerSink("collector",
"Collector of values from all threads.", sink);
final TestSource[] sources = new TestSource[numThreads];
final Thread[] threads = new Thread[numThreads];
final String[] results = new String[numThreads];
final CyclicBarrier barrier1 = new CyclicBarrier(numThreads),
barrier2 = new CyclicBarrier(numThreads);
for (int i = 0; i < numThreads; i++) {
sources[i] = ms.register("threadSource" + i,
"A source of my threaded goodness.",
new TestSource("threadSourceRec" + i));
threads[i] = new Thread(new Runnable() {
private boolean safeAwait(int mySource, CyclicBarrier barrier) {
try {
barrier.await(2, TimeUnit.SECONDS);
} catch (InterruptedException e) {
results[mySource] = "Interrupted";
return false;
} catch (BrokenBarrierException e) {
results[mySource] = "Broken Barrier";
return false;
} catch (TimeoutException e) {
results[mySource] = "Timed out on barrier";
return false;
}
return true;
}
@Override
public void run() {
int mySource = Integer.parseInt(Thread.currentThread().getName());
if (sink.collected[mySource].get() != 0L) {
results[mySource] = "Someone else collected my metric!";
return;
}
// Wait for all the threads to come here so we can hammer
// the system at the same time
if (!safeAwait(mySource, barrier1)) return;
sources[mySource].g1.set(230);
ms.publishMetricsNow();
// Since some other thread may have snatched my metric,
// I need to wait for the threads to finish before checking.
if (!safeAwait(mySource, barrier2)) return;
if (sink.collected[mySource].get() != 230L) {
results[mySource] = "Metric not collected!";
return;
}
results[mySource] = "Passed";
}
}, "" + i);
}
for (Thread t : threads)
t.start();
for (Thread t : threads)
t.join();
assertEquals(0L, ms.droppedPubAll.value());
assertTrue(StringUtils.join("\n", Arrays.asList(results)),
Iterables.all(Arrays.asList(results), new Predicate<String>() {
@Override
public boolean apply(@Nullable String input) {
return input.equalsIgnoreCase("Passed");
}
}));
ms.stop();
ms.shutdown();
}
private static class CollectingSink implements MetricsSink {
private final AtomicLong[] collected;
public CollectingSink(int capacity) {
collected = new AtomicLong[capacity];
for (int i = 0; i < capacity; i++) {
collected[i] = new AtomicLong();
}
}
@Override
public void init(SubsetConfiguration conf) {
}
@Override
public void putMetrics(MetricsRecord record) {
final String prefix = "threadSourceRec";
if (record.name().startsWith(prefix)) {
final int recordNumber = Integer.parseInt(
record.name().substring(prefix.length()));
ArrayList<String> names = new ArrayList<String>();
for (AbstractMetric m : record.metrics()) {
if (m.name().equalsIgnoreCase("g1")) {
collected[recordNumber].set(m.value().longValue());
return;
}
names.add(m.name());
}
}
}
@Override
public void flush() {
}
}
@Test public void testHangingSink() {
new ConfigBuilder().add("*.period", 8)
.add("test.sink.test.class", TestSink.class.getName())
.add("test.sink.hanging.retry.delay", "1")
.add("test.sink.hanging.retry.backoff", "1.01")
.add("test.sink.hanging.retry.count", "0")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
TestSource s = ms.register("s3", "s3 desc", new TestSource("s3rec"));
s.c1.incr();
HangingSink hanging = new HangingSink();
ms.registerSink("hanging", "Hang the sink!", hanging);
ms.publishMetricsNow();
assertEquals(1L, ms.droppedPubAll.value());
assertFalse(hanging.getInterrupted());
ms.stop();
ms.shutdown();
assertTrue(hanging.getInterrupted());
assertTrue("The sink didn't get called after its first hang " +
"for subsequent records.", hanging.getGotCalledSecondTime());
}
private static class HangingSink implements MetricsSink {
private volatile boolean interrupted;
private boolean gotCalledSecondTime;
private boolean firstTime = true;
public boolean getGotCalledSecondTime() {
return gotCalledSecondTime;
}
public boolean getInterrupted() {
return interrupted;
}
@Override
public void init(SubsetConfiguration conf) {
}
@Override
public void putMetrics(MetricsRecord record) {
// No need to hang every time, just the first record.
if (!firstTime) {
gotCalledSecondTime = true;
return;
}
firstTime = false;
try {
Thread.sleep(10 * 1000);
} catch (InterruptedException ex) {
interrupted = true;
}
}
@Override
public void flush() {
}
}
@Test public void testRegisterDups() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts1 = new TestSource("ts1");
TestSource ts2 = new TestSource("ts2");
ms.register("ts1", "", ts1);
MetricsSource s1 = ms.getSource("ts1");
assertNotNull(s1);
// should work when metrics system is not started
ms.register("ts1", "", ts2);
MetricsSource s2 = ms.getSource("ts1");
assertNotNull(s2);
assertNotSame(s1, s2);
ms.shutdown();
}
@Test(expected=MetricsException.class) public void testRegisterDupError() {
MetricsSystem ms = new MetricsSystemImpl("test");
TestSource ts = new TestSource("ts");
ms.register(ts);
ms.register(ts);
}
@Test public void testStartStopStart() {
DefaultMetricsSystem.shutdown(); // Clear pre-existing source names.
MetricsSystemImpl ms = new MetricsSystemImpl("test");
TestSource ts = new TestSource("ts");
ms.start();
ms.register("ts", "", ts);
MetricsSourceAdapter sa = ms.getSourceAdapter("ts");
assertNotNull(sa);
assertNotNull(sa.getMBeanName());
ms.stop();
ms.shutdown();
ms.start();
sa = ms.getSourceAdapter("ts");
assertNotNull(sa);
assertNotNull(sa.getMBeanName());
ms.stop();
ms.shutdown();
}
@Test public void testUnregisterSource() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts1 = new TestSource("ts1");
TestSource ts2 = new TestSource("ts2");
ms.register("ts1", "", ts1);
ms.register("ts2", "", ts2);
MetricsSource s1 = ms.getSource("ts1");
assertNotNull(s1);
// should work when metrics system is not started
ms.unregisterSource("ts1");
s1 = ms.getSource("ts1");
assertNull(s1);
MetricsSource s2 = ms.getSource("ts2");
assertNotNull(s2);
ms.shutdown();
}
@Test public void testRegisterSourceWithoutName() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts = new TestSource("ts");
TestSource2 ts2 = new TestSource2("ts2");
ms.register(ts);
ms.register(ts2);
ms.init("TestMetricsSystem");
// if metrics source is registered without name,
// the class name will be used as the name
MetricsSourceAdapter sa = ((MetricsSystemImpl) ms)
.getSourceAdapter("TestSource");
assertNotNull(sa);
MetricsSourceAdapter sa2 = ((MetricsSystemImpl) ms)
.getSourceAdapter("TestSource2");
assertNotNull(sa2);
ms.shutdown();
}
private void checkMetricsRecords(List<MetricsRecord> recs) {
LOG.debug(recs);
MetricsRecord r = recs.get(0);
assertEquals("name", "s1rec", r.name());
assertEquals("tags", new MetricsTag[] {
tag(MsInfo.Context, "test"),
tag(MsInfo.Hostname, hostname)}, r.tags());
assertEquals("metrics", MetricsLists.builder("")
.addCounter(info("C1", "C1 desc"), 1L)
.addGauge(info("G1", "G1 desc"), 2L)
.addCounter(info("S1NumOps", "Number of ops for s1"), 1L)
.addGauge(info("S1AvgTime", "Average time for s1"), 0.0)
.metrics(), r.metrics());
r = recs.get(1);
assertTrue("NumActiveSinks should be 3", Iterables.contains(r.metrics(),
new MetricGaugeInt(MsInfo.NumActiveSinks, 3)));
}
@Test
public void testQSize() throws Exception {
new ConfigBuilder().add("*.period", 8)
.add("test.sink.test.class", TestSink.class.getName())
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
final CountDownLatch proceedSignal = new CountDownLatch(1);
final CountDownLatch reachedPutMetricSignal = new CountDownLatch(1);
ms.start();
try {
MetricsSink slowSink = mock(MetricsSink.class);
MetricsSink dataSink = mock(MetricsSink.class);
ms.registerSink("slowSink",
"The sink that will wait on putMetric", slowSink);
ms.registerSink("dataSink",
"The sink I'll use to get info about slowSink", dataSink);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
reachedPutMetricSignal.countDown();
proceedSignal.await();
return null;
}
}).when(slowSink).putMetrics(any(MetricsRecord.class));
// trigger metric collection first time
ms.onTimerEvent();
assertTrue(reachedPutMetricSignal.await(1, TimeUnit.SECONDS));
// Now that the slow sink is still processing the first metric,
// its queue length should be 1 for the second collection.
ms.onTimerEvent();
verify(dataSink, timeout(500).times(2)).putMetrics(r1.capture());
List<MetricsRecord> mr = r1.getAllValues();
Number qSize = Iterables.find(mr.get(1).metrics(),
new Predicate<AbstractMetric>() {
@Override
public boolean apply(@Nullable AbstractMetric input) {
assert input != null;
return input.name().equals("Sink_slowSinkQsize");
}
}).value();
assertEquals(1, qSize);
} finally {
proceedSignal.countDown();
ms.stop();
}
}
/**
* Class to verify HADOOP-11932. Instead of reading from HTTP, going in loop
* until closed.
*/
private static class TestClosableSink implements MetricsSink, Closeable {
boolean closed = false;
CountDownLatch collectingLatch;
public TestClosableSink(CountDownLatch collectingLatch) {
this.collectingLatch = collectingLatch;
}
@Override
public void init(SubsetConfiguration conf) {
}
@Override
public void close() throws IOException {
closed = true;
}
@Override
public void putMetrics(MetricsRecord record) {
while (!closed) {
collectingLatch.countDown();
}
}
@Override
public void flush() {
}
}
/**
* HADOOP-11932
*/
@Test(timeout = 5000)
public void testHangOnSinkRead() throws Exception {
new ConfigBuilder().add("*.period", 8)
.add("test.sink.test.class", TestSink.class.getName())
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
try {
CountDownLatch collectingLatch = new CountDownLatch(1);
MetricsSink sink = new TestClosableSink(collectingLatch);
ms.registerSink("closeableSink",
"The sink will be used to test closeability", sink);
// trigger metric collection first time
ms.onTimerEvent();
// Make sure that sink is collecting metrics
assertTrue(collectingLatch.await(1, TimeUnit.SECONDS));
} finally {
ms.stop();
}
}
@Metrics(context="test")
private static class TestSource {
@Metric("C1 desc") MutableCounterLong c1;
@Metric("XXX desc") MutableCounterLong xxx;
@Metric("G1 desc") MutableGaugeLong g1;
@Metric("YYY desc") MutableGaugeLong yyy;
@Metric MutableRate s1;
final MetricsRegistry registry;
TestSource(String recName) {
registry = new MetricsRegistry(recName);
}
}
@Metrics(context="test")
private static class TestSource2 {
@Metric("C1 desc") MutableCounterLong c1;
@Metric("XXX desc") MutableCounterLong xxx;
@Metric("G1 desc") MutableGaugeLong g1;
@Metric("YYY desc") MutableGaugeLong yyy;
@Metric MutableRate s1;
final MetricsRegistry registry;
TestSource2(String recName) {
registry = new MetricsRegistry(recName);
}
}
private static String getPluginUrlsAsString() {
return "file:metrics2-test-plugin.jar";
}
}
| 19,853 | 33.290155 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsRegistry.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import static org.apache.hadoop.metrics2.lib.Interns.*;
import static org.apache.hadoop.test.MetricsAsserts.*;
/**
* Test the metric registry class
*/
public class TestMetricsRegistry {
/**
* Test various factory methods
*/
@Test public void testNewMetrics() {
final MetricsRegistry r = new MetricsRegistry("test");
r.newCounter("c1", "c1 desc", 1);
r.newCounter("c2", "c2 desc", 2L);
r.newGauge("g1", "g1 desc", 3);
r.newGauge("g2", "g2 desc", 4L);
r.newStat("s1", "s1 desc", "ops", "time");
assertEquals("num metrics in registry", 5, r.metrics().size());
assertTrue("c1 found", r.get("c1") instanceof MutableCounterInt);
assertTrue("c2 found", r.get("c2") instanceof MutableCounterLong);
assertTrue("g1 found", r.get("g1") instanceof MutableGaugeInt);
assertTrue("g2 found", r.get("g2") instanceof MutableGaugeLong);
assertTrue("s1 found", r.get("s1") instanceof MutableStat);
expectMetricsException("Metric name c1 already exists", new Runnable() {
@Override
public void run() { r.newCounter("c1", "test dup", 0); }
});
}
/**
* Test adding metrics with whitespace in the name
*/
@Test
public void testMetricsRegistryIllegalMetricNames() {
final MetricsRegistry r = new MetricsRegistry("test");
// Fill up with some basics
r.newCounter("c1", "c1 desc", 1);
r.newGauge("g1", "g1 desc", 1);
r.newQuantiles("q1", "q1 desc", "q1 name", "q1 val type", 1);
// Add some illegal names
expectMetricsException("Metric name 'badcount 2' contains "+
"illegal whitespace character", new Runnable() {
@Override
public void run() { r.newCounter("badcount 2", "c2 desc", 2); }
});
expectMetricsException("Metric name 'badcount3 ' contains "+
"illegal whitespace character", new Runnable() {
@Override
public void run() { r.newCounter("badcount3 ", "c3 desc", 3); }
});
expectMetricsException("Metric name ' badcount4' contains "+
"illegal whitespace character", new Runnable() {
@Override
public void run() { r.newCounter(" badcount4", "c4 desc", 4); }
});
expectMetricsException("Metric name 'withtab5 ' contains "+
"illegal whitespace character", new Runnable() {
@Override
public void run() { r.newCounter("withtab5 ", "c5 desc", 5); }
});
expectMetricsException("Metric name 'withnewline6\n' contains "+
"illegal whitespace character", new Runnable() {
@Override
public void run() { r.newCounter("withnewline6\n", "c6 desc", 6); }
});
// Final validation
assertEquals("num metrics in registry", 3, r.metrics().size());
}
/**
* Test the add by name method
*/
@Test public void testAddByName() {
MetricsRecordBuilder rb = mockMetricsRecordBuilder();
final MetricsRegistry r = new MetricsRegistry("test");
r.add("s1", 42);
r.get("s1").snapshot(rb);
verify(rb).addCounter(info("S1NumOps", "Number of ops for s1"), 1L);
verify(rb).addGauge(info("S1AvgTime", "Average time for s1"), 42.0);
r.newCounter("c1", "test add", 1);
r.newGauge("g1", "test add", 1);
expectMetricsException("Unsupported add", new Runnable() {
@Override
public void run() { r.add("c1", 42); }
});
expectMetricsException("Unsupported add", new Runnable() {
@Override
public void run() { r.add("g1", 42); }
});
}
@Ignore
private void expectMetricsException(String prefix, Runnable fun) {
try {
fun.run();
}
catch (MetricsException e) {
assertTrue("expected exception", e.getMessage().startsWith(prefix));
return;
}
fail("should've thrown '"+ prefix +"...'");
}
}
| 4,805 | 34.080292 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestInterns.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsTag;
import static org.apache.hadoop.metrics2.lib.Interns.*;
public class TestInterns {
@Test public void testInfo() {
MetricsInfo info = info("m", "m desc");
assertSame("same info", info, info("m", "m desc"));
}
@Test public void testTag() {
MetricsTag tag = tag("t", "t desc", "t value");
assertSame("same tag", tag, tag("t", "t desc", "t value"));
}
@Test public void testInfoOverflow() {
MetricsInfo i0 = info("m0", "m desc");
for (int i = 0; i < MAX_INFO_NAMES + 1; ++i) {
info("m"+ i, "m desc");
if (i < MAX_INFO_NAMES) {
assertSame("m0 is still there", i0, info("m0", "m desc"));
}
}
assertNotSame("m0 is gone", i0, info("m0", "m desc"));
MetricsInfo i1 = info("m1", "m desc");
for (int i = 0; i < MAX_INFO_DESCS; ++i) {
info("m1", "m desc"+ i);
if (i < MAX_INFO_DESCS - 1) {
assertSame("i1 is still there", i1, info("m1", "m desc"));
}
}
assertNotSame("i1 is gone", i1, info("m1", "m desc"));
}
@Test public void testTagOverflow() {
MetricsTag t0 = tag("t0", "t desc", "t value");
for (int i = 0; i < MAX_TAG_NAMES + 1; ++i) {
tag("t"+ i, "t desc", "t value");
if (i < MAX_TAG_NAMES) {
assertSame("t0 still there", t0, tag("t0", "t desc", "t value"));
}
}
assertNotSame("t0 is gone", t0, tag("t0", "t desc", "t value"));
MetricsTag t1 = tag("t1", "t desc", "t value");
for (int i = 0; i < MAX_TAG_VALUES; ++i) {
tag("t1", "t desc", "t value"+ i);
if (i < MAX_TAG_VALUES -1) {
assertSame("t1 is still there", t1, tag("t1", "t desc", "t value"));
}
}
assertNotSame("t1 is gone", t1, tag("t1", "t desc", "t value"));
}
}
| 2,728 | 33.1125 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsAnnotations.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metric.*;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.impl.MsInfo;
import static org.apache.hadoop.metrics2.lib.Interns.*;
import static org.apache.hadoop.test.MetricsAsserts.*;
public class TestMetricsAnnotations {
static class MyMetrics {
@Metric MutableCounterInt c1;
@Metric({"Counter2", "Counter2 desc"}) MutableCounterLong c2;
@Metric MutableGaugeInt g1, g2;
@Metric("g3 desc") MutableGaugeLong g3;
@Metric MutableRate r1;
@Metric MutableStat s1;
@Metric MutableRates rs1;
}
@Test public void testFields() {
MyMetrics metrics = new MyMetrics();
MetricsSource source = MetricsAnnotations.makeSource(metrics);
metrics.c1.incr();
metrics.c2.incr();
metrics.g1.incr();
metrics.g2.incr();
metrics.g3.incr();
metrics.r1.add(1);
metrics.s1.add(1);
metrics.rs1.add("rs1", 1);
MetricsRecordBuilder rb = getMetrics(source);
verify(rb).addCounter(info("C1", "C1"), 1);
verify(rb).addCounter(info("Counter2", "Counter2 desc"), 1L);
verify(rb).addGauge(info("G1", "G1"), 1);
verify(rb).addGauge(info("G2", "G2"), 1);
verify(rb).addGauge(info("G3", "g3 desc"), 1L);
verify(rb).addCounter(info("R1NumOps", "Number of ops for r1"), 1L);
verify(rb).addGauge(info("R1AvgTime", "Average time for r1"), 1.0);
verify(rb).addCounter(info("S1NumOps", "Number of ops for s1"), 1L);
verify(rb).addGauge(info("S1AvgTime", "Average time for s1"), 1.0);
verify(rb).addCounter(info("Rs1NumOps", "Number of ops for rs1"), 1L);
verify(rb).addGauge(info("Rs1AvgTime", "Average time for rs1"), 1.0);
}
static class BadMetrics {
@Metric Integer i0;
}
@Test(expected=MetricsException.class) public void testBadFields() {
MetricsAnnotations.makeSource(new BadMetrics());
}
static class MyMetrics2 {
@Metric int getG1() { return 1; }
@Metric long getG2() { return 2; }
@Metric float getG3() { return 3; }
@Metric double getG4() { return 4; }
@Metric(type=Type.COUNTER) int getC1() { return 1; }
@Metric(type=Type.COUNTER) long getC2() { return 2; }
@Metric(type=Type.TAG) String getT1() { return "t1"; }
}
@Test public void testMethods() {
MyMetrics2 metrics = new MyMetrics2();
MetricsSource source = MetricsAnnotations.makeSource(metrics);
MetricsRecordBuilder rb = getMetrics(source);
verify(rb).addGauge(info("G1", "G1"), 1);
verify(rb).addGauge(info("G2", "G2"), 2L);
verify(rb).addGauge(info("G3", "G3"), 3.0f);
verify(rb).addGauge(info("G4", "G4"), 4.0);
verify(rb).addCounter(info("C1", "C1"), 1);
verify(rb).addCounter(info("C2", "C2"), 2L);
verify(rb).tag(info("T1", "T1"), "t1");
}
static class BadMetrics2 {
@Metric int foo(int i) { return i; }
}
@Test(expected=IllegalArgumentException.class)
public void testBadMethodWithArgs() {
MetricsAnnotations.makeSource(new BadMetrics2());
}
static class BadMetrics3 {
@Metric boolean foo() { return true; }
}
@Test(expected=MetricsException.class)
public void testBadMethodReturnType() {
MetricsAnnotations.makeSource(new BadMetrics3());
}
@Metrics(about="My metrics", context="foo")
static class MyMetrics3 {
@Metric int getG1() { return 1; }
}
@Test public void testClasses() {
MetricsRecordBuilder rb = getMetrics(
MetricsAnnotations.makeSource(new MyMetrics3()));
MetricsCollector collector = rb.parent();
verify(collector).addRecord(info("MyMetrics3", "My metrics"));
verify(rb).add(tag(MsInfo.Context, "foo"));
}
static class HybridMetrics implements MetricsSource {
final MetricsRegistry registry = new MetricsRegistry("HybridMetrics")
.setContext("hybrid");
@Metric("C0 desc") MutableCounterInt C0;
@Metric int getG0() { return 0; }
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
collector.addRecord("foo")
.setContext("foocontext")
.addCounter(info("C1", "C1 desc"), 1)
.endRecord()
.addRecord("bar")
.setContext("barcontext")
.addGauge(info("G1", "G1 desc"), 1);
registry.snapshot(collector.addRecord(registry.info()), all);
}
}
@Test public void testHybrid() {
HybridMetrics metrics = new HybridMetrics();
MetricsSource source = MetricsAnnotations.makeSource(metrics);
assertSame(metrics, source);
metrics.C0.incr();
MetricsRecordBuilder rb = getMetrics(source);
MetricsCollector collector = rb.parent();
verify(collector).addRecord("foo");
verify(collector).addRecord("bar");
verify(collector).addRecord(info("HybridMetrics", "HybridMetrics"));
verify(rb).setContext("foocontext");
verify(rb).addCounter(info("C1", "C1 desc"), 1);
verify(rb).setContext("barcontext");
verify(rb).addGauge(info("G1", "G1 desc"), 1);
verify(rb).add(tag(MsInfo.Context, "hybrid"));
verify(rb).addCounter(info("C0", "C0 desc"), 1);
verify(rb).addGauge(info("G0", "G0"), 0);
}
@Metrics(context="hybrid")
static class BadHybridMetrics implements MetricsSource {
@Metric MutableCounterInt c1;
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
collector.addRecord("foo");
}
}
@Test(expected=MetricsException.class) public void testBadHybrid() {
MetricsAnnotations.makeSource(new BadHybridMetrics());
}
static class EmptyMetrics {
int foo;
}
@Test(expected=MetricsException.class) public void testEmptyMetrics() {
MetricsAnnotations.makeSource(new EmptyMetrics());
}
}
| 6,920 | 33.093596 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestUniqNames.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestUniqNames {
@Test public void testCommonCases() {
UniqueNames u = new UniqueNames();
assertEquals("foo", u.uniqueName("foo"));
assertEquals("foo-1", u.uniqueName("foo"));
}
@Test public void testCollisions() {
UniqueNames u = new UniqueNames();
u.uniqueName("foo");
assertEquals("foo-1", u.uniqueName("foo-1"));
assertEquals("foo-2", u.uniqueName("foo"));
assertEquals("foo-1-1", u.uniqueName("foo-1"));
assertEquals("foo-2-1", u.uniqueName("foo-2"));
}
}
| 1,427 | 32.209302 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import static org.apache.hadoop.metrics2.lib.Interns.info;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.mockMetricsRecordBuilder;
import static org.mockito.AdditionalMatchers.eq;
import static org.mockito.AdditionalMatchers.geq;
import static org.mockito.AdditionalMatchers.leq;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.Quantile;
import org.junit.Test;
/**
* Test metrics record builder interface and mutable metrics
*/
public class TestMutableMetrics {
private final double EPSILON = 1e-42;
/**
* Test the snapshot method
*/
@Test public void testSnapshot() {
MetricsRecordBuilder mb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
registry.newCounter("c1", "int counter", 1);
registry.newCounter("c2", "long counter", 2L);
registry.newGauge("g1", "int gauge", 3);
registry.newGauge("g2", "long gauge", 4L);
registry.newStat("s1", "stat", "Ops", "Time", true).add(0);
registry.newRate("s2", "stat", false).add(0);
registry.snapshot(mb, true);
MutableStat s2 = (MutableStat) registry.get("s2");
s2.snapshot(mb, true); // should get the same back.
s2.add(1);
s2.snapshot(mb, true); // should get new interval values back
verify(mb).addCounter(info("c1", "int counter"), 1);
verify(mb).addCounter(info("c2", "long counter"), 2L);
verify(mb).addGauge(info("g1", "int gauge"), 3);
verify(mb).addGauge(info("g2", "long gauge"), 4L);
verify(mb).addCounter(info("S1NumOps", "Number of ops for stat"), 1L);
verify(mb).addGauge(eq(info("S1AvgTime", "Average time for stat")),
eq(0.0, EPSILON));
verify(mb).addGauge(eq(info("S1StdevTime",
"Standard deviation of time for stat")),
eq(0.0, EPSILON));
verify(mb).addGauge(eq(info("S1IMinTime",
"Interval min time for stat")),
eq(0.0, EPSILON));
verify(mb).addGauge(eq(info("S1IMaxTime",
"Interval max time for stat")),
eq(0.0, EPSILON));
verify(mb).addGauge(eq(info("S1MinTime","Min time for stat")),
eq(0.0, EPSILON));
verify(mb).addGauge(eq(info("S1MaxTime","Max time for stat")),
eq(0.0, EPSILON));
verify(mb, times(2))
.addCounter(info("S2NumOps", "Number of ops for stat"), 1L);
verify(mb, times(2)).addGauge(eq(info("S2AvgTime",
"Average time for stat")),
eq(0.0, EPSILON));
verify(mb).addCounter(info("S2NumOps", "Number of ops for stat"), 2L);
verify(mb).addGauge(eq(info("S2AvgTime", "Average time for stat")),
eq(1.0, EPSILON));
}
interface TestProtocol {
void foo();
void bar();
}
@Test public void testMutableRates() {
MetricsRecordBuilder rb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
MutableRates rates = new MutableRates(registry);
rates.init(TestProtocol.class);
registry.snapshot(rb, false);
assertCounter("FooNumOps", 0L, rb);
assertGauge("FooAvgTime", 0.0, rb);
assertCounter("BarNumOps", 0L, rb);
assertGauge("BarAvgTime", 0.0, rb);
}
/**
* Ensure that quantile estimates from {@link MutableQuantiles} are within
* specified error bounds.
*/
@Test(timeout = 30000)
public void testMutableQuantilesError() throws Exception {
MetricsRecordBuilder mb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
// Use a 5s rollover period
MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops",
"Latency", 5);
// Push some values in and wait for it to publish
long start = System.nanoTime() / 1000000;
for (long i = 1; i <= 1000; i++) {
quantiles.add(i);
quantiles.add(1001 - i);
}
long end = System.nanoTime() / 1000000;
Thread.sleep(6000 - (end - start));
registry.snapshot(mb, false);
// Print out the snapshot
Map<Quantile, Long> previousSnapshot = quantiles.previousSnapshot;
for (Entry<Quantile, Long> item : previousSnapshot.entrySet()) {
System.out.println(String.format("Quantile %.2f has value %d",
item.getKey().quantile, item.getValue()));
}
// Verify the results are within our requirements
verify(mb).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"),
(long) 2000);
Quantile[] quants = MutableQuantiles.quantiles;
String name = "Foo%dthPercentileLatency";
String desc = "%d percentile latency with 5 second interval for stat";
for (Quantile q : quants) {
int percentile = (int) (100 * q.quantile);
int error = (int) (1000 * q.error);
String n = String.format(name, percentile);
String d = String.format(desc, percentile);
long expected = (long) (q.quantile * 1000);
verify(mb).addGauge(eq(info(n, d)), leq(expected + error));
verify(mb).addGauge(eq(info(n, d)), geq(expected - error));
}
}
/**
* Test that {@link MutableQuantiles} rolls the window over at the specified
* interval.
*/
@Test(timeout = 30000)
public void testMutableQuantilesRollover() throws Exception {
MetricsRecordBuilder mb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
// Use a 5s rollover period
MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops",
"Latency", 5);
Quantile[] quants = MutableQuantiles.quantiles;
String name = "Foo%dthPercentileLatency";
String desc = "%d percentile latency with 5 second interval for stat";
// Push values for three intervals
long start = System.nanoTime() / 1000000;
for (int i = 1; i <= 3; i++) {
// Insert the values
for (long j = 1; j <= 1000; j++) {
quantiles.add(i);
}
// Sleep until 1s after the next 5s interval, to let the metrics
// roll over
long sleep = (start + (5000 * i) + 1000) - (System.nanoTime() / 1000000);
Thread.sleep(sleep);
// Verify that the window reset, check it has the values we pushed in
registry.snapshot(mb, false);
for (Quantile q : quants) {
int percentile = (int) (100 * q.quantile);
String n = String.format(name, percentile);
String d = String.format(desc, percentile);
verify(mb).addGauge(info(n, d), (long) i);
}
}
// Verify the metrics were added the right number of times
verify(mb, times(3)).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"),
(long) 1000);
for (Quantile q : quants) {
int percentile = (int) (100 * q.quantile);
String n = String.format(name, percentile);
String d = String.format(desc, percentile);
verify(mb, times(3)).addGauge(eq(info(n, d)), anyLong());
}
}
/**
* Test that {@link MutableQuantiles} rolls over correctly even if no items
* have been added to the window
*/
@Test(timeout = 30000)
public void testMutableQuantilesEmptyRollover() throws Exception {
MetricsRecordBuilder mb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
// Use a 5s rollover period
MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops",
"Latency", 5);
// Check it initially
quantiles.snapshot(mb, true);
verify(mb).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
Thread.sleep(6000);
quantiles.snapshot(mb, false);
verify(mb, times(2)).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
}
}
| 9,098 | 37.392405 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.source;
import org.junit.Test;
import static org.mockito.Mockito.*;
import static org.apache.hadoop.test.MetricsAsserts.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.util.JvmPauseMonitor;
import static org.apache.hadoop.metrics2.source.JvmMetricsInfo.*;
import static org.apache.hadoop.metrics2.impl.MsInfo.*;
public class TestJvmMetrics {
@Test public void testPresence() {
JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration());
JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
jvmMetrics.setPauseMonitor(pauseMonitor);
MetricsRecordBuilder rb = getMetrics(jvmMetrics);
MetricsCollector mc = rb.parent();
verify(mc).addRecord(JvmMetrics);
verify(rb).tag(ProcessName, "test");
verify(rb).tag(SessionId, "test");
for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
if (info.name().startsWith("Mem"))
verify(rb).addGauge(eq(info), anyFloat());
else if (info.name().startsWith("Gc"))
verify(rb).addCounter(eq(info), anyLong());
else if (info.name().startsWith("Threads"))
verify(rb).addGauge(eq(info), anyInt());
else if (info.name().startsWith("Log"))
verify(rb).addCounter(eq(info), anyLong());
}
}
}
| 2,211 | 37.137931 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.junit.Test;
import org.mockito.InOrder;
public class TestContentSummary {
// check the empty constructor correctly initialises the object
@Test
public void testConstructorEmpty() {
ContentSummary contentSummary = new ContentSummary.Builder().build();
assertEquals("getLength", 0, contentSummary.getLength());
assertEquals("getFileCount", 0, contentSummary.getFileCount());
assertEquals("getDirectoryCount", 0, contentSummary.getDirectoryCount());
assertEquals("getQuota", -1, contentSummary.getQuota());
assertEquals("getSpaceConsumed", 0, contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota", -1, contentSummary.getSpaceQuota());
}
// check the full constructor with quota information
@Test
public void testConstructorWithQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66666;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
assertEquals("getLength", length, contentSummary.getLength());
assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
assertEquals("getDirectoryCount", directoryCount,
contentSummary.getDirectoryCount());
assertEquals("getQuota", quota, contentSummary.getQuota());
assertEquals("getSpaceConsumed", spaceConsumed,
contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota", spaceQuota, contentSummary.getSpaceQuota());
}
// check the constructor with quota information
@Test
public void testConstructorNoQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).
spaceConsumed(length).build();
assertEquals("getLength", length, contentSummary.getLength());
assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
assertEquals("getDirectoryCount", directoryCount,
contentSummary.getDirectoryCount());
assertEquals("getQuota", -1, contentSummary.getQuota());
assertEquals("getSpaceConsumed", length, contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota", -1, contentSummary.getSpaceQuota());
}
// check the write method
@Test
public void testWrite() throws IOException {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66666;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
DataOutput out = mock(DataOutput.class);
InOrder inOrder = inOrder(out);
contentSummary.write(out);
inOrder.verify(out).writeLong(length);
inOrder.verify(out).writeLong(fileCount);
inOrder.verify(out).writeLong(directoryCount);
inOrder.verify(out).writeLong(quota);
inOrder.verify(out).writeLong(spaceConsumed);
inOrder.verify(out).writeLong(spaceQuota);
}
// check the readFields method
@Test
public void testReadFields() throws IOException {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66666;
ContentSummary contentSummary = new ContentSummary.Builder().build();
DataInput in = mock(DataInput.class);
when(in.readLong()).thenReturn(length).thenReturn(fileCount)
.thenReturn(directoryCount).thenReturn(quota).thenReturn(spaceConsumed)
.thenReturn(spaceQuota);
contentSummary.readFields(in);
assertEquals("getLength", length, contentSummary.getLength());
assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
assertEquals("getDirectoryCount", directoryCount,
contentSummary.getDirectoryCount());
assertEquals("getQuota", quota, contentSummary.getQuota());
assertEquals("getSpaceConsumed", spaceConsumed,
contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota", spaceQuota, contentSummary.getSpaceQuota());
}
// check the header with quotas
@Test
public void testGetHeaderWithQuota() {
String header = " QUOTA REM_QUOTA SPACE_QUOTA "
+ "REM_SPACE_QUOTA DIR_COUNT FILE_COUNT CONTENT_SIZE ";
assertEquals(header, ContentSummary.getHeader(true));
}
// check the header without quotas
@Test
public void testGetHeaderNoQuota() {
String header = " DIR_COUNT FILE_COUNT CONTENT_SIZE ";
assertEquals(header, ContentSummary.getHeader(false));
}
// check the toString method with quotas
@Test
public void testToStringWithQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected = " 44444 -11111 66665 11110"
+ " 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString(true));
}
// check the toString method with quotas
@Test
public void testToStringNoQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).build();
String expected = " none inf none"
+ " inf 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString(true));
}
// check the toString method with quotas
@Test
public void testToStringNoShowQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected = " 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString(false));
}
// check the toString method (defaults to with quotas)
@Test
public void testToString() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected = " 44444 -11111 66665"
+ " 11110 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString());
}
// check the toString method with quotas
@Test
public void testToStringHumanWithQuota() {
long length = Long.MAX_VALUE;
long fileCount = 222222222;
long directoryCount = 33333;
long quota = 222256578;
long spaceConsumed = 1073741825;
long spaceQuota = 1;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected = " 212.0 M 1023 1 "
+ " -1 G 32.6 K 211.9 M 8.0 E ";
assertEquals(expected, contentSummary.toString(true, true));
}
// check the toString method with quotas
@Test
public void testToStringHumanNoShowQuota() {
long length = Long.MAX_VALUE;
long fileCount = 222222222;
long directoryCount = 33333;
long quota = 222256578;
long spaceConsumed = 55555;
long spaceQuota = Long.MAX_VALUE;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected = " 32.6 K 211.9 M 8.0 E ";
assertEquals(expected, contentSummary.toString(false, true));
}
}
| 9,908 | 37.55642 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.StringReader;
import java.util.Random;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestDFVariations {
private static final String TEST_ROOT_DIR =
System.getProperty("test.build.data","build/test/data") + "/TestDFVariations";
private static File test_root = null;
@Before
public void setup() throws IOException {
test_root = new File(TEST_ROOT_DIR);
test_root.mkdirs();
}
@After
public void after() throws IOException {
FileUtil.setWritable(test_root, true);
FileUtil.fullyDelete(test_root);
assertTrue(!test_root.exists());
}
public static class XXDF extends DF {
public XXDF() throws IOException {
super(test_root, 0L);
}
@Override
protected String[] getExecString() {
return new String[] { "echo", "IGNORE\n",
"/dev/sda3", "453115160", "53037920", "400077240", "11%", "/foo/bar\n"};
}
}
public void testMount() throws Exception {
XXDF df = new XXDF();
String expectedMount =
Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/foo/bar";
assertEquals("Invalid mount point",
expectedMount, df.getMount());
}
@Test(timeout=5000)
public void testFileSystem() throws Exception {
XXDF df = new XXDF();
String expectedFileSystem =
Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/dev/sda3";
assertEquals("Invalid filesystem",
expectedFileSystem, df.getFilesystem());
}
@Test(timeout=5000)
public void testDFInvalidPath() throws Exception {
// Generate a path that doesn't exist
Random random = new Random(0xDEADBEEFl);
File file = null;
byte[] bytes = new byte[64];
while (file == null) {
random.nextBytes(bytes);
final String invalid = new String("/" + bytes);
final File invalidFile = new File(invalid);
if (!invalidFile.exists()) {
file = invalidFile;
}
}
DF df = new DF(file, 0l);
try {
df.getMount();
} catch (FileNotFoundException e) {
// expected, since path does not exist
GenericTestUtils.assertExceptionContains(file.getName(), e);
}
}
@Test(timeout=5000)
public void testDFMalformedOutput() throws Exception {
DF df = new DF(new File("/"), 0l);
BufferedReader reader = new BufferedReader(new StringReader(
"Filesystem 1K-blocks Used Available Use% Mounted on\n" +
"/dev/sda5 19222656 10597036 7649060 59% /"));
df.parseExecResult(reader);
df.parseOutput();
reader = new BufferedReader(new StringReader(
"Filesystem 1K-blocks Used Available Use% Mounted on"));
df.parseExecResult(reader);
try {
df.parseOutput();
fail("Expected exception with missing line!");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Fewer lines of output than expected", e);
System.out.println(e.toString());
}
reader = new BufferedReader(new StringReader(
"Filesystem 1K-blocks Used Available Use% Mounted on\n" +
" "));
df.parseExecResult(reader);
try {
df.parseOutput();
fail("Expected exception with empty line!");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Unexpected empty line", e);
System.out.println(e.toString());
}
reader = new BufferedReader(new StringReader(
"Filesystem 1K-blocks Used Available Use% Mounted on\n" +
" 19222656 10597036 7649060 59% /"));
df.parseExecResult(reader);
try {
df.parseOutput();
fail("Expected exception with missing field!");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Could not parse line: ", e);
System.out.println(e.toString());
}
}
@Test(timeout=5000)
public void testGetMountCurrentDirectory() throws Exception {
File currentDirectory = new File(".");
String workingDir = currentDirectory.getAbsoluteFile().getCanonicalPath();
DF df = new DF(new File(workingDir), 0L);
String mountPath = df.getMount();
File mountDir = new File(mountPath);
assertTrue("Mount dir ["+mountDir.getAbsolutePath()+"] should exist.",
mountDir.exists());
assertTrue("Mount dir ["+mountDir.getAbsolutePath()+"] should be directory.",
mountDir.isDirectory());
assertTrue("Working dir ["+workingDir+"] should start with ["+mountPath+"].",
workingDir.startsWith(mountPath));
}
}
| 5,673 | 32.376471 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert;
/**
* Helper class for unit tests.
*/
public final class FileSystemTestWrapper extends FSTestWrapper {
private final FileSystem fs;
public FileSystemTestWrapper(FileSystem fs) {
this(fs, null);
}
public FileSystemTestWrapper(FileSystem fs, String rootDir) {
super(rootDir);
this.fs = fs;
}
public FSTestWrapper getLocalFSWrapper()
throws IOException {
return new FileSystemTestWrapper(FileSystem.getLocal(fs.getConf()));
}
public Path getDefaultWorkingDirectory() throws IOException {
return getTestRootPath("/user/" + System.getProperty("user.name"))
.makeQualified(fs.getUri(),
fs.getWorkingDirectory());
}
/*
* Create files with numBlocks blocks each with block size blockSize.
*/
public long createFile(Path path, int numBlocks, CreateOpts... options)
throws IOException {
BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
: DEFAULT_BLOCK_SIZE;
FSDataOutputStream out =
create(path, EnumSet.of(CreateFlag.CREATE), options);
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
return data.length;
}
public long createFile(Path path, int numBlocks, int blockSize)
throws IOException {
return createFile(path, numBlocks, CreateOpts.blockSize(blockSize),
CreateOpts.createParent());
}
public long createFile(Path path) throws IOException {
return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
}
public long createFile(String name) throws IOException {
Path path = getTestRootPath(name);
return createFile(path);
}
public long createFileNonRecursive(String name) throws IOException {
Path path = getTestRootPath(name);
return createFileNonRecursive(path);
}
public long createFileNonRecursive(Path path) throws IOException {
return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.donotCreateParent());
}
public void appendToFile(Path path, int numBlocks, CreateOpts... options)
throws IOException {
BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
: DEFAULT_BLOCK_SIZE;
FSDataOutputStream out;
out = fs.append(path);
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
}
public boolean exists(Path p) throws IOException {
return fs.exists(p);
}
public boolean isFile(Path p) throws IOException {
try {
return fs.getFileStatus(p).isFile();
} catch (FileNotFoundException e) {
return false;
}
}
public boolean isDir(Path p) throws IOException {
try {
return fs.getFileStatus(p).isDirectory();
} catch (FileNotFoundException e) {
return false;
}
}
public boolean isSymlink(Path p) throws IOException {
try {
return fs.getFileLinkStatus(p).isSymlink();
} catch (FileNotFoundException e) {
return false;
}
}
public void writeFile(Path path, byte b[]) throws IOException {
FSDataOutputStream out =
create(path,EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent());
out.write(b);
out.close();
}
public byte[] readFile(Path path, int len) throws IOException {
DataInputStream dis = fs.open(path);
byte[] buffer = new byte[len];
IOUtils.readFully(dis, buffer, 0, len);
dis.close();
return buffer;
}
public FileStatus containsPath(Path path, FileStatus[] dirList)
throws IOException {
for(int i = 0; i < dirList.length; i ++) {
if (path.equals(dirList[i].getPath()))
return dirList[i];
}
return null;
}
public FileStatus containsPath(String path, FileStatus[] dirList)
throws IOException {
return containsPath(new Path(path), dirList);
}
public void checkFileStatus(String path, fileType expectedType)
throws IOException {
FileStatus s = fs.getFileStatus(new Path(path));
Assert.assertNotNull(s);
if (expectedType == fileType.isDir) {
Assert.assertTrue(s.isDirectory());
} else if (expectedType == fileType.isFile) {
Assert.assertTrue(s.isFile());
} else if (expectedType == fileType.isSymlink) {
Assert.assertTrue(s.isSymlink());
}
Assert.assertEquals(fs.makeQualified(new Path(path)), s.getPath());
}
public void checkFileLinkStatus(String path, fileType expectedType)
throws IOException {
FileStatus s = fs.getFileLinkStatus(new Path(path));
Assert.assertNotNull(s);
if (expectedType == fileType.isDir) {
Assert.assertTrue(s.isDirectory());
} else if (expectedType == fileType.isFile) {
Assert.assertTrue(s.isFile());
} else if (expectedType == fileType.isSymlink) {
Assert.assertTrue(s.isSymlink());
}
Assert.assertEquals(fs.makeQualified(new Path(path)), s.getPath());
}
//
// FileContext wrappers
//
@Override
public Path makeQualified(Path path) {
return fs.makeQualified(path);
}
@SuppressWarnings("deprecation")
@Override
public void mkdir(Path dir, FsPermission permission, boolean createParent)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
fs.primitiveMkdir(dir, permission, createParent);
}
@Override
public boolean delete(Path f, boolean recursive)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return fs.delete(f, recursive);
}
@Override
public FileStatus getFileLinkStatus(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fs.getFileLinkStatus(f);
}
@Override
public void createSymlink(Path target, Path link, boolean createParent)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
fs.createSymlink(target, link, createParent);
}
@Override
public void setWorkingDirectory(Path newWDir) throws IOException {
fs.setWorkingDirectory(newWDir);
}
@Override
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
}
@Override
public FileStatus getFileStatus(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fs.getFileStatus(f);
}
@Override
public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag,
CreateOpts... opts) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
// Need to translate the FileContext-style options into FileSystem-style
// Permissions with umask
CreateOpts.Perms permOpt = CreateOpts.getOpt(
CreateOpts.Perms.class, opts);
FsPermission umask = FsPermission.getUMask(fs.getConf());
FsPermission permission = (permOpt != null) ? permOpt.getValue()
: FsPermission.getFileDefault().applyUMask(umask);
permission = permission.applyUMask(umask);
// Overwrite
boolean overwrite = createFlag.contains(CreateFlag.OVERWRITE);
// bufferSize
int bufferSize = fs.getConf().getInt(
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
CreateOpts.BufferSize bufOpt = CreateOpts.getOpt(
CreateOpts.BufferSize.class, opts);
bufferSize = (bufOpt != null) ? bufOpt.getValue() : bufferSize;
// replication
short replication = fs.getDefaultReplication(f);
CreateOpts.ReplicationFactor repOpt =
CreateOpts.getOpt(CreateOpts.ReplicationFactor.class, opts);
replication = (repOpt != null) ? repOpt.getValue() : replication;
// blockSize
long blockSize = fs.getDefaultBlockSize(f);
CreateOpts.BlockSize blockOpt = CreateOpts.getOpt(
CreateOpts.BlockSize.class, opts);
blockSize = (blockOpt != null) ? blockOpt.getValue() : blockSize;
// Progressable
Progressable progress = null;
CreateOpts.Progress progressOpt = CreateOpts.getOpt(
CreateOpts.Progress.class, opts);
progress = (progressOpt != null) ? progressOpt.getValue() : progress;
return fs.create(f, permission, overwrite, bufferSize, replication,
blockSize, progress);
}
@Override
public FSDataInputStream open(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fs.open(f);
}
@Override
public Path getLinkTarget(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fs.getLinkTarget(f);
}
@Override
public boolean setReplication(final Path f, final short replication)
throws AccessControlException, FileNotFoundException,
IOException {
return fs.setReplication(f, replication);
}
@SuppressWarnings("deprecation")
@Override
public void rename(Path src, Path dst, Rename... options)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
fs.rename(src, dst, options);
}
@Override
public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return fs.getFileBlockLocations(f, start, len);
}
@Override
public FileChecksum getFileChecksum(Path f) throws AccessControlException,
FileNotFoundException, IOException {
return fs.getFileChecksum(f);
}
@Override
public RemoteIterator<FileStatus> listStatusIterator(Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return fs.listStatusIterator(f);
}
@Override
public void setPermission(final Path f, final FsPermission permission)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
fs.setPermission(f, permission);
}
@Override
public void setOwner(final Path f, final String username,
final String groupname) throws AccessControlException,
UnsupportedFileSystemException, FileNotFoundException,
IOException {
fs.setOwner(f, username, groupname);
}
@Override
public void setTimes(Path f, long mtime, long atime)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
fs.setTimes(f, mtime, atime);
}
@Override
public FileStatus[] listStatus(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fs.listStatus(f);
}
@Override
public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
throws IOException {
return fs.globStatus(pathPattern, filter);
}
}
| 12,609 | 32.359788 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
/**
* Abstraction of filesystem operations that is essentially an interface
* extracted from {@link FileContext}.
*/
public interface FSWrapper {
abstract public void setWorkingDirectory(final Path newWDir)
throws IOException;
abstract public Path getWorkingDirectory();
abstract public Path makeQualified(final Path path);
abstract public FSDataOutputStream create(final Path f,
final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException;
abstract public void mkdir(final Path dir, final FsPermission permission,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException, IOException;
abstract public boolean delete(final Path f, final boolean recursive)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public FSDataInputStream open(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public boolean setReplication(final Path f, final short replication)
throws AccessControlException, FileNotFoundException,
IOException;
abstract public void rename(final Path src, final Path dst,
final Options.Rename... options) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException, IOException;
abstract public void setPermission(final Path f, final FsPermission permission)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public void setOwner(final Path f, final String username,
final String groupname) throws AccessControlException,
UnsupportedFileSystemException, FileNotFoundException,
IOException;
abstract public void setTimes(final Path f, final long mtime, final long atime)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public FileChecksum getFileChecksum(final Path f)
throws AccessControlException, FileNotFoundException, IOException;
abstract public FileStatus getFileStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public FileStatus getFileLinkStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public Path getLinkTarget(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public BlockLocation[] getFileBlockLocations(final Path f,
final long start, final long len) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException;
abstract public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException, IOException;
abstract public RemoteIterator<FileStatus> listStatusIterator(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public FileStatus[] listStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException;
abstract public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
throws IOException;
}
| 4,949 | 41.672414 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.util.ArrayList;
import java.util.regex.Pattern;
import org.junit.Assert;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
/**
* <p>
* A collection of tests for the {@link FileContext} to test path names passed
* as URIs. This test should be used for testing an instance of FileContext that
* has been initialized to a specific default FileSystem such a LocalFileSystem,
* HDFS,S3, etc, and where path names are passed that are URIs in a different
* FileSystem.
* </p>
*
* <p>
* To test a given {@link FileSystem} implementation create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fc1</code> and
* <code>fc2</code>
*
* The tests will do operations on fc1 that use a URI in fc2
*
* {@link FileContext} instance variable.
* </p>
*/
public abstract class FileContextURIBase {
private static final String basePath = System.getProperty("test.build.data",
"build/test/data") + "/testContextURI";
private static final Path BASE = new Path(basePath);
// Matches anything containing <, >, :, ", |, ?, *, or anything that ends with
// space or dot.
private static final Pattern WIN_INVALID_FILE_NAME_PATTERN = Pattern.compile(
"^(.*?[<>\\:\"\\|\\?\\*].*?)|(.*?[ \\.])$");
protected FileContext fc1;
protected FileContext fc2;
//Helper method to make path qualified
protected Path qualifiedPath(String path, FileContext fc) {
return fc.makeQualified(new Path(BASE, path));
}
@Before
public void setUp() throws Exception { }
@After
public void tearDown() throws Exception {
// Clean up after test completion
// No need to clean fc1 as fc1 and fc2 points same location
fc2.delete(BASE, true);
}
@Test
public void testCreateFile() throws IOException {
String fileNames[] = {
"testFile", "test File",
"test*File", "test#File",
"test1234", "1234Test",
"test)File", "test_File",
"()&^%$#@!~_+}{><?",
" ", "^ " };
for (String f : fileNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Now create file
createFile(fc1, testPath);
// Ensure fc2 has the created file
Assert.assertTrue(exists(fc2, testPath));
}
}
@Test
public void testCreateFileWithNullName() throws IOException {
String fileName = null;
try {
Path testPath = qualifiedPath(fileName, fc2);
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
Assert.fail("Create file with null name should throw IllegalArgumentException.");
} catch (IllegalArgumentException e) {
// expected
}
}
@Test
public void testCreateExistingFile() throws IOException {
String fileName = "testFile";
Path testPath = qualifiedPath(fileName, fc2);
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Create same file with fc1
try {
createFile(fc2, testPath);
Assert.fail("Create existing file should throw an IOException.");
} catch (IOException e) {
// expected
}
// Ensure fc2 has the created file
Assert.assertTrue(exists(fc2, testPath));
}
@Test
public void testCreateFileInNonExistingDirectory() throws IOException {
String fileName = "testDir/testFile";
Path testPath = qualifiedPath(fileName, fc2);
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Ensure using fc2 that file is created
Assert.assertTrue(isDir(fc2, testPath.getParent()));
Assert.assertEquals("testDir", testPath.getParent().getName());
Assert.assertTrue(exists(fc2, testPath));
}
@Test
public void testCreateDirectory() throws IOException {
Path path = qualifiedPath("test/hadoop", fc2);
Path falsePath = qualifiedPath("path/doesnot.exist", fc2);
Path subDirPath = qualifiedPath("dir0", fc2);
// Ensure that testPath does not exist in fc1
Assert.assertFalse(exists(fc1, path));
Assert.assertFalse(isFile(fc1, path));
Assert.assertFalse(isDir(fc1, path));
// Create a directory on fc2's file system using fc1
fc1.mkdir(path, FsPermission.getDefault(), true);
// Ensure fc2 has directory
Assert.assertTrue(isDir(fc2, path));
Assert.assertTrue(exists(fc2, path));
Assert.assertFalse(isFile(fc2, path));
// Test to create same dir twice, (HDFS mkdir is similar to mkdir -p )
fc1.mkdir(subDirPath, FsPermission.getDefault(), true);
// This should not throw exception
fc1.mkdir(subDirPath, FsPermission.getDefault(), true);
// Create Sub Dirs
fc1.mkdir(subDirPath, FsPermission.getDefault(), true);
// Check parent dir
Path parentDir = path.getParent();
Assert.assertTrue(exists(fc2, parentDir));
Assert.assertFalse(isFile(fc2, parentDir));
// Check parent parent dir
Path grandparentDir = parentDir.getParent();
Assert.assertTrue(exists(fc2, grandparentDir));
Assert.assertFalse(isFile(fc2, grandparentDir));
// Negative test cases
Assert.assertFalse(exists(fc2, falsePath));
Assert.assertFalse(isDir(fc2, falsePath));
// TestCase - Create multiple directories
String dirNames[] = {
"createTest/testDir", "createTest/test Dir",
"deleteTest/test*Dir", "deleteTest/test#Dir",
"deleteTest/test1234", "deleteTest/test_DIr",
"deleteTest/1234Test", "deleteTest/test)Dir",
"deleteTest/()&^%$#@!~_+}{><?", " ", "^ " };
for (String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Now create directory
fc1.mkdir(testPath, FsPermission.getDefault(), true);
// Ensure fc2 has the created directory
Assert.assertTrue(exists(fc2, testPath));
Assert.assertTrue(isDir(fc2, testPath));
}
}
@Test
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = qualifiedPath("test/hadoop", fc2);
Assert.assertFalse(exists(fc2, testDir));
fc2.mkdir(testDir, FsPermission.getDefault(), true);
Assert.assertTrue(exists(fc2, testDir));
// Create file on fc1 using fc2 context
createFile(fc1, qualifiedPath("test/hadoop/file", fc2));
Path testSubDir = qualifiedPath("test/hadoop/file/subdir", fc2);
try {
fc1.mkdir(testSubDir, FsPermission.getDefault(), true);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
Assert.assertFalse(exists(fc1, testSubDir));
Path testDeepSubDir = qualifiedPath("test/hadoop/file/deep/sub/dir", fc1);
try {
fc2.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
Assert.assertFalse(exists(fc1, testDeepSubDir));
}
@Test
public void testIsDirectory() throws IOException {
String dirName = "dirTest";
String invalidDir = "nonExistantDir";
String rootDir = "/";
Path existingPath = qualifiedPath(dirName, fc2);
Path nonExistingPath = qualifiedPath(invalidDir, fc2);
Path pathToRootDir = qualifiedPath(rootDir, fc2);
// Create a directory on fc2's file system using fc1
fc1.mkdir(existingPath, FsPermission.getDefault(), true);
// Ensure fc2 has directory
Assert.assertTrue(isDir(fc2, existingPath));
Assert.assertTrue(isDir(fc2, pathToRootDir));
// Negative test case
Assert.assertFalse(isDir(fc2, nonExistingPath));
}
@Test
public void testDeleteFile() throws IOException {
Path testPath = qualifiedPath("testFile", fc2);
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// First create a file on file system using fc1
createFile(fc1, testPath);
// Ensure file exist
Assert.assertTrue(exists(fc2, testPath));
// Delete file using fc2
fc2.delete(testPath, false);
// Ensure fc2 does not have deleted file
Assert.assertFalse(exists(fc2, testPath));
}
@Test
public void testDeleteNonExistingFile() throws IOException {
String testFileName = "testFile";
Path testPath = qualifiedPath(testFileName, fc2);
// TestCase1 : Test delete on file never existed
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
Assert.assertFalse(fc2.delete(testPath, false));
// TestCase2 : Create , Delete , Delete file
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Ensure file exist
Assert.assertTrue(exists(fc2, testPath));
// Delete test file, deleting existing file should return true
Assert.assertTrue(fc2.delete(testPath, false));
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
Assert.assertFalse(fc2.delete(testPath, false));
}
@Test
public void testDeleteNonExistingFileInDir() throws IOException {
String testFileInDir = "testDir/testDir/TestFile";
Path testPath = qualifiedPath(testFileInDir, fc2);
// TestCase1 : Test delete on file never existed
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
Assert.assertFalse(fc2.delete(testPath, false));
// TestCase2 : Create , Delete , Delete file
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Ensure file exist
Assert.assertTrue(exists(fc2, testPath));
// Delete test file, deleting existing file should return true
Assert.assertTrue(fc2.delete(testPath, false));
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
Assert.assertFalse(fc2.delete(testPath, false));
}
@Test
public void testDeleteDirectory() throws IOException {
String dirName = "dirTest";
Path testDirPath = qualifiedPath(dirName, fc2);
// Ensure directory does not exist
Assert.assertFalse(exists(fc2, testDirPath));
// Create a directory on fc2's file system using fc1
fc1.mkdir(testDirPath, FsPermission.getDefault(), true);
// Ensure dir is created
Assert.assertTrue(exists(fc2, testDirPath));
Assert.assertTrue(isDir(fc2, testDirPath));
fc2.delete(testDirPath, true);
// Ensure that directory is deleted
Assert.assertFalse(isDir(fc2, testDirPath));
// TestCase - Create and delete multiple directories
String dirNames[] = {
"deleteTest/testDir", "deleteTest/test Dir",
"deleteTest/test*Dir", "deleteTest/test#Dir",
"deleteTest/test1234", "deleteTest/1234Test",
"deleteTest/test)Dir", "deleteTest/test_DIr",
"deleteTest/()&^%$#@!~_+}{><?", " ", "^ " };
for (String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Now create directory
fc1.mkdir(testPath, FsPermission.getDefault(), true);
// Ensure fc2 has the created directory
Assert.assertTrue(exists(fc2, testPath));
Assert.assertTrue(isDir(fc2, testPath));
// Delete dir
Assert.assertTrue(fc2.delete(testPath, true));
// verify if directory is deleted
Assert.assertFalse(exists(fc2, testPath));
Assert.assertFalse(isDir(fc2, testPath));
}
}
@Test
public void testDeleteNonExistingDirectory() throws IOException {
String testDirName = "testFile";
Path testPath = qualifiedPath(testDirName, fc2);
// TestCase1 : Test delete on directory never existed
// Ensure directory does not exist
Assert.assertFalse(exists(fc2, testPath));
// Delete on non existing directory should return false
Assert.assertFalse(fc2.delete(testPath, false));
// TestCase2 : Create dir, Delete dir, Delete dir
// Create a file on fc2's file system using fc1
fc1.mkdir(testPath, FsPermission.getDefault(), true);
// Ensure dir exist
Assert.assertTrue(exists(fc2, testPath));
// Delete test file, deleting existing file should return true
Assert.assertTrue(fc2.delete(testPath, false));
// Ensure file does not exist
Assert.assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
Assert.assertFalse(fc2.delete(testPath, false));
}
@Test
public void testModificationTime() throws IOException {
String testFile = "file1";
long fc2ModificationTime, fc1ModificationTime;
Path testPath = qualifiedPath(testFile, fc2);
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Get modification time using fc2 and fc1
fc1ModificationTime = fc1.getFileStatus(testPath).getModificationTime();
fc2ModificationTime = fc2.getFileStatus(testPath).getModificationTime();
// Ensure fc1 and fc2 reports same modification time
Assert.assertEquals(fc1ModificationTime, fc2ModificationTime);
}
@Test
public void testFileStatus() throws IOException {
String fileName = "file1";
Path path2 = fc2.makeQualified(new Path(BASE, fileName));
// Create a file on fc2's file system using fc1
createFile(fc1, path2);
FsStatus fc2Status = fc2.getFsStatus(path2);
// FsStatus , used, free and capacity are non-negative longs
Assert.assertNotNull(fc2Status);
Assert.assertTrue(fc2Status.getCapacity() > 0);
Assert.assertTrue(fc2Status.getRemaining() > 0);
Assert.assertTrue(fc2Status.getUsed() > 0);
}
@Test
public void testGetFileStatusThrowsExceptionForNonExistentFile()
throws Exception {
String testFile = "test/hadoop/fileDoesNotExist";
Path testPath = qualifiedPath(testFile, fc2);
try {
fc1.getFileStatus(testPath);
Assert.fail("Should throw FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
}
}
@Test
public void testListStatusThrowsExceptionForNonExistentFile()
throws Exception {
String testFile = "test/hadoop/file";
Path testPath = qualifiedPath(testFile, fc2);
try {
fc1.listStatus(testPath);
Assert.fail("Should throw FileNotFoundException");
} catch (FileNotFoundException fnfe) {
// expected
}
}
@Test
public void testListStatus() throws Exception {
final String hPrefix = "test/hadoop";
final String[] dirs = {
hPrefix + "/a",
hPrefix + "/b",
hPrefix + "/c",
hPrefix + "/1",
hPrefix + "/#@#@",
hPrefix + "/&*#$#$@234"};
ArrayList<Path> testDirs = new ArrayList<Path>();
for (String d : dirs) {
if (!isTestableFileNameOnPlatform(d)) {
continue;
}
testDirs.add(qualifiedPath(d, fc2));
}
Assert.assertFalse(exists(fc1, testDirs.get(0)));
for (Path path : testDirs) {
fc1.mkdir(path, FsPermission.getDefault(), true);
}
// test listStatus that returns an array of FileStatus
FileStatus[] paths = fc1.util().listStatus(qualifiedPath("test", fc1));
Assert.assertEquals(1, paths.length);
Assert.assertEquals(qualifiedPath(hPrefix, fc1), paths[0].getPath());
paths = fc1.util().listStatus(qualifiedPath(hPrefix, fc1));
Assert.assertEquals(testDirs.size(), paths.length);
for (int i = 0; i < testDirs.size(); i++) {
boolean found = false;
for (int j = 0; j < paths.length; j++) {
if (qualifiedPath(testDirs.get(i).toString(), fc1).equals(
paths[j].getPath())) {
found = true;
}
}
Assert.assertTrue(testDirs.get(i) + " not found", found);
}
paths = fc1.util().listStatus(qualifiedPath(dirs[0], fc1));
Assert.assertEquals(0, paths.length);
// test listStatus that returns an iterator of FileStatus
RemoteIterator<FileStatus> pathsItor =
fc1.listStatus(qualifiedPath("test", fc1));
Assert.assertEquals(qualifiedPath(hPrefix, fc1), pathsItor.next().getPath());
Assert.assertFalse(pathsItor.hasNext());
pathsItor = fc1.listStatus(qualifiedPath(hPrefix, fc1));
int dirLen = 0;
for (; pathsItor.hasNext(); dirLen++) {
boolean found = false;
FileStatus stat = pathsItor.next();
for (int j = 0; j < dirs.length; j++) {
if (qualifiedPath(dirs[j],fc1).equals(stat.getPath())) {
found = true;
break;
}
}
Assert.assertTrue(stat.getPath() + " not found", found);
}
Assert.assertEquals(testDirs.size(), dirLen);
pathsItor = fc1.listStatus(qualifiedPath(dirs[0], fc1));
Assert.assertFalse(pathsItor.hasNext());
}
/**
* Returns true if the argument is a file name that is testable on the platform
* currently running the test. This is intended for use by tests so that they
* can skip checking file names that aren't supported by the underlying
* platform. The current implementation specifically checks for patterns that
* are not valid file names on Windows when the tests are running on Windows.
*
* @param fileName String file name to check
* @return boolean true if the argument is valid as a file name
*/
private static boolean isTestableFileNameOnPlatform(String fileName) {
boolean valid = true;
if (Shell.WINDOWS) {
// Disallow reserved characters: <, >, :, ", |, ?, *.
// Disallow trailing space or period.
// See http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
valid = !WIN_INVALID_FILE_NAME_PATTERN.matcher(fileName).matches();
}
return valid;
}
}
| 19,420 | 31.530988 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.shell.FsCommand;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.io.IOUtils;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import org.apache.hadoop.util.Shell;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This test validates that chmod, chown, chgrp returning correct exit codes
*
*/
public class TestFsShellReturnCode {
private static final Log LOG = LogFactory
.getLog("org.apache.hadoop.fs.TestFsShellReturnCode");
private static final Configuration conf = new Configuration();
private static FileSystem fileSys;
private static FsShell fsShell;
@BeforeClass
public static void setup() throws IOException {
conf.setClass("fs.file.impl", LocalFileSystemExtn.class, LocalFileSystem.class);
fileSys = FileSystem.get(conf);
fsShell = new FsShell(conf);
}
private static String TEST_ROOT_DIR = System.getProperty("test.build.data",
"build/test/data/testCHReturnCode");
static void writeFile(FileSystem fs, Path name) throws Exception {
FSDataOutputStream stm = fs.create(name);
stm.writeBytes("42\n");
stm.close();
}
private void change(int exit, String owner, String group, String...files)
throws Exception {
FileStatus[][] oldStats = new FileStatus[files.length][];
for (int i=0; i < files.length; i++) {
oldStats[i] = fileSys.globStatus(new Path(files[i]));
}
List<String>argv = new LinkedList<String>();
if (owner != null) {
argv.add("-chown");
String chown = owner;
if (group != null) {
chown += ":" + group;
if (group.isEmpty()) group = null; // avoid testing for it later
}
argv.add(chown);
} else {
argv.add("-chgrp");
argv.add(group);
}
Collections.addAll(argv, files);
assertEquals(exit, fsShell.run(argv.toArray(new String[0])));
for (int i=0; i < files.length; i++) {
FileStatus[] stats = fileSys.globStatus(new Path(files[i]));
if (stats != null) {
for (int j=0; j < stats.length; j++) {
assertEquals("check owner of " + files[i],
((owner != null) ? "STUB-"+owner : oldStats[i][j].getOwner()),
stats[j].getOwner()
);
assertEquals("check group of " + files[i],
((group != null) ? "STUB-"+group : oldStats[i][j].getGroup()),
stats[j].getGroup()
);
}
}
}
}
/**
* Test Chmod 1. Create and write file on FS 2. Verify that exit code for
* chmod on existing file is 0 3. Verify that exit code for chmod on
* non-existing file is 1 4. Verify that exit code for chmod with glob input
* on non-existing file is 1 5. Verify that exit code for chmod with glob
* input on existing file in 0
*
* @throws Exception
*/
@Test (timeout = 30000)
public void testChmod() throws Exception {
Path p1 = new Path(TEST_ROOT_DIR, "testChmod/fileExists");
final String f1 = p1.toUri().getPath();
final String f2 = new Path(TEST_ROOT_DIR, "testChmod/fileDoesNotExist")
.toUri().getPath();
final String f3 = new Path(TEST_ROOT_DIR, "testChmod/nonExistingfiles*")
.toUri().getPath();
final Path p4 = new Path(TEST_ROOT_DIR, "testChmod/file1");
final Path p5 = new Path(TEST_ROOT_DIR, "testChmod/file2");
final Path p6 = new Path(TEST_ROOT_DIR, "testChmod/file3");
final String f7 = new Path(TEST_ROOT_DIR, "testChmod/file*").toUri()
.getPath();
// create and write test file
writeFile(fileSys, p1);
assertTrue(fileSys.exists(p1));
// Test 1: Test 1: exit code for chmod on existing is 0
String argv[] = { "-chmod", "777", f1 };
assertEquals(0, fsShell.run(argv));
// Test 2: exit code for chmod on non-existing path is 1
String argv2[] = { "-chmod", "777", f2 };
assertEquals(1, fsShell.run(argv2));
// Test 3: exit code for chmod on non-existing path with globbed input is 1
String argv3[] = { "-chmod", "777", f3 };
assertEquals(1, fsShell.run(argv3));
// create required files
writeFile(fileSys, p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys, p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys, p6);
assertTrue(fileSys.exists(p6));
// Test 4: exit code for chmod on existing path with globbed input is 0
String argv4[] = { "-chmod", "777", f7 };
assertEquals(0, fsShell.run(argv4));
}
/**
* Test Chown 1. Create and write file on FS 2. Verify that exit code for
* Chown on existing file is 0 3. Verify that exit code for Chown on
* non-existing file is 1 4. Verify that exit code for Chown with glob input
* on non-existing file is 1 5. Verify that exit code for Chown with glob
* input on existing file in 0
*
* @throws Exception
*/
@Test (timeout = 30000)
public void testChown() throws Exception {
Path p1 = new Path(TEST_ROOT_DIR, "testChown/fileExists");
final String f1 = p1.toUri().getPath();
final String f2 = new Path(TEST_ROOT_DIR, "testChown/fileDoesNotExist")
.toUri().getPath();
final String f3 = new Path(TEST_ROOT_DIR, "testChown/nonExistingfiles*")
.toUri().getPath();
final Path p4 = new Path(TEST_ROOT_DIR, "testChown/file1");
final Path p5 = new Path(TEST_ROOT_DIR, "testChown/file2");
final Path p6 = new Path(TEST_ROOT_DIR, "testChown/file3");
final String f7 = new Path(TEST_ROOT_DIR, "testChown/file*").toUri()
.getPath();
// create and write test file
writeFile(fileSys, p1);
assertTrue(fileSys.exists(p1));
// Test 1: exit code for chown on existing file is 0
change(0, "admin", null, f1);
// Test 2: exit code for chown on non-existing path is 1
change(1, "admin", null, f2);
// Test 3: exit code for chown on non-existing path with globbed input is 1
change(1, "admin", null, f3);
// create required files
writeFile(fileSys, p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys, p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys, p6);
assertTrue(fileSys.exists(p6));
// Test 4: exit code for chown on existing path with globbed input is 0
change(0, "admin", null, f7);
//Test 5: test for setOwner invocation on FS from command handler.
change(0, "admin", "Test", f1);
change(0, "admin", "", f1);
}
/**
* Test Chgrp 1. Create and write file on FS 2. Verify that exit code for
* chgrp on existing file is 0 3. Verify that exit code for chgrp on
* non-existing file is 1 4. Verify that exit code for chgrp with glob input
* on non-existing file is 1 5. Verify that exit code for chgrp with glob
* input on existing file in 0
*
* @throws Exception
*/
@Test (timeout = 30000)
public void testChgrp() throws Exception {
Path p1 = new Path(TEST_ROOT_DIR, "testChgrp/fileExists");
final String f1 = p1.toUri().getPath();
final String f2 = new Path(TEST_ROOT_DIR, "testChgrp/fileDoesNotExist")
.toUri().getPath();
final String f3 = new Path(TEST_ROOT_DIR, "testChgrp/nonExistingfiles*")
.toUri().getPath();
final Path p4 = new Path(TEST_ROOT_DIR, "testChgrp/file1");
final Path p5 = new Path(TEST_ROOT_DIR, "testChgrp/file2");
final Path p6 = new Path(TEST_ROOT_DIR, "testChgrp/file3");
final String f7 = new Path(TEST_ROOT_DIR, "testChgrp/file*").toUri()
.getPath();
// create and write test file
writeFile(fileSys, p1);
assertTrue(fileSys.exists(p1));
// Test 1: exit code for chgrp on existing file is 0
change(0, null, "admin", f1);
// Test 2: exit code for chgrp on non existing path is 1
change(1, null, "admin", f2);
change(1, null, "admin", f2, f1); // exit code used to be for last item
// Test 3: exit code for chgrp on non-existing path with globbed input is 1
change(1, null, "admin", f3);
change(1, null, "admin", f3, f1);
// create required files
writeFile(fileSys, p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys, p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys, p6);
assertTrue(fileSys.exists(p6));
// Test 4: exit code for chgrp on existing path with globbed input is 0
change(0, null, "admin", f7);
change(1, null, "admin", f2, f7);
}
@Test (timeout = 30000)
public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole()
throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(out);
final String results;
try {
Path tdir = new Path(TEST_ROOT_DIR, "notNullCopy");
fileSys.delete(tdir, true);
fileSys.mkdirs(tdir);
String[] args = new String[3];
args[0] = "-get";
args[1] = new Path(tdir.toUri().getPath(), "/invalidSrc").toString();
args[2] = new Path(tdir.toUri().getPath(), "/invalidDst").toString();
assertTrue("file exists", !fileSys.exists(new Path(args[1])));
assertTrue("file exists", !fileSys.exists(new Path(args[2])));
int run = shell.run(args);
results = bytes.toString();
assertEquals("Return code should be 1", 1, run);
assertTrue(" Null is coming when source path is invalid. ",!results.contains("get: null"));
assertTrue(" Not displaying the intended message ",results.contains("get: `"+args[1]+"': No such file or directory"));
} finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
@Test (timeout = 30000)
public void testRmWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream err = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(err);
final String results;
try {
int exit = shell.run(new String[]{"-rm", "nomatch*"});
assertEquals(1, exit);
results = bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
} finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
@Test (timeout = 30000)
public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream err = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(err);
try {
int exit = shell.run(new String[]{"-rm", "-f", "nomatch*"});
assertEquals(0, exit);
assertTrue(bytes.toString().isEmpty());
} finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
@Test (timeout = 30000)
public void testInvalidDefaultFS() throws Exception {
// if default fs doesn't exist or is invalid, but the path provided in
// arguments is valid - fsshell should work
FsShell shell = new FsShell();
Configuration conf = new Configuration();
conf.set(FS_DEFAULT_NAME_KEY, "hhhh://doesnotexist/");
shell.setConf(conf);
String [] args = new String[2];
args[0] = "-ls";
args[1] = "file:///"; // this is valid, so command should run
int res = shell.run(args);
System.out.println("res =" + res);
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(out);
final String results;
try {
int run = shell.run(args);
results = bytes.toString();
LOG.info("result=" + results);
assertTrue("Return code should be 0", run == 0);
} finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
/**
* Faked Chown class for {@link testChownUserAndGroupValidity()}.
*
* The test only covers argument parsing, so override to skip processing.
*/
private static class FakeChown extends FsShellPermissions.Chown {
public static String NAME = "chown";
@Override
protected void processArgument(PathData item) {
}
}
/**
* Tests combinations of valid and invalid user and group arguments to chown.
*/
@Test
public void testChownUserAndGroupValidity() {
testChownUserAndGroupValidity(true);
testChownUserAndGroupValidity(false);
}
private void testChownUserAndGroupValidity(boolean enableWarning) {
Configuration conf = new Configuration();
conf.setBoolean(
HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY, enableWarning);
FsCommand chown = new FakeChown();
chown.setConf(conf);
// The following are valid (no exception expected).
chown.run("user", "/path");
chown.run("user:group", "/path");
chown.run(":group", "/path");
// The following are valid only on Windows.
assertValidArgumentsOnWindows(chown, "User With Spaces", "/path");
assertValidArgumentsOnWindows(chown, "User With Spaces:group", "/path");
assertValidArgumentsOnWindows(chown, "User With Spaces:Group With Spaces",
"/path");
assertValidArgumentsOnWindows(chown, "user:Group With Spaces", "/path");
assertValidArgumentsOnWindows(chown, ":Group With Spaces", "/path");
// The following are invalid (exception expected).
assertIllegalArguments(chown, "us!er", "/path");
assertIllegalArguments(chown, "us^er", "/path");
assertIllegalArguments(chown, "user:gr#oup", "/path");
assertIllegalArguments(chown, "user:gr%oup", "/path");
assertIllegalArguments(chown, ":gr#oup", "/path");
assertIllegalArguments(chown, ":gr%oup", "/path");
}
/**
* Faked Chgrp class for {@link testChgrpGroupValidity()}.
* The test only covers argument parsing, so override to skip processing.
*/
private static class FakeChgrp extends FsShellPermissions.Chgrp {
public static String NAME = "chgrp";
@Override
protected void processArgument(PathData item) {
}
}
/**
* Tests valid and invalid group arguments to chgrp.
*/
@Test
public void testChgrpGroupValidity() {
testChgrpGroupValidity(true);
testChgrpGroupValidity(false);
}
private void testChgrpGroupValidity(boolean enableWarning) {
Configuration conf = new Configuration();
conf.setBoolean(
HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY, enableWarning);
FsShellPermissions.Chgrp chgrp = new FakeChgrp();
chgrp.setConf(conf);
// The following are valid (no exception expected).
chgrp.run("group", "/path");
// The following are valid only on Windows.
assertValidArgumentsOnWindows(chgrp, "Group With Spaces", "/path");
// The following are invalid (exception expected).
assertIllegalArguments(chgrp, ":gr#oup", "/path");
assertIllegalArguments(chgrp, ":gr%oup", "/path");
}
static class LocalFileSystemExtn extends LocalFileSystem {
public LocalFileSystemExtn() {
super(new RawLocalFileSystemExtn());
}
}
static class RawLocalFileSystemExtn extends RawLocalFileSystem {
protected static HashMap<String,String> owners = new HashMap<String,String>();
protected static HashMap<String,String> groups = new HashMap<String,String>();
@Override
public FSDataOutputStream create(Path p) throws IOException {
//owners.remove(p);
//groups.remove(p);
return super.create(p);
}
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
String f = makeQualified(p).toString();
if (username != null) {
owners.put(f, username);
}
if (groupname != null) {
groups.put(f, groupname);
}
}
@Override
public FileStatus getFileStatus(Path p) throws IOException {
String f = makeQualified(p).toString();
FileStatus stat = super.getFileStatus(p);
stat.getPermission();
if (owners.containsKey(f)) {
stat.setOwner("STUB-"+owners.get(f));
} else {
stat.setOwner("REAL-"+stat.getOwner());
}
if (groups.containsKey(f)) {
stat.setGroup("STUB-"+groups.get(f));
} else {
stat.setGroup("REAL-"+stat.getGroup());
}
return stat;
}
}
/**
* Asserts that for the given command, the given arguments are considered
* invalid. The expectation is that the command will throw
* IllegalArgumentException.
*
* @param cmd FsCommand to check
* @param args String... arguments to check
*/
private static void assertIllegalArguments(FsCommand cmd, String... args) {
try {
cmd.run(args);
fail("Expected IllegalArgumentException from args: " +
Arrays.toString(args));
} catch (IllegalArgumentException e) {
}
}
/**
* Asserts that for the given command, the given arguments are considered valid
* on Windows, but invalid elsewhere.
*
* @param cmd FsCommand to check
* @param args String... arguments to check
*/
private static void assertValidArgumentsOnWindows(FsCommand cmd,
String... args) {
if (Shell.WINDOWS) {
cmd.run(args);
} else {
assertIllegalArguments(cmd, args);
}
}
}
| 18,967 | 33.115108 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.HashSet;
import java.util.Set;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Time;
/**
* This class tests commands from Trash.
*/
public class TestTrash extends TestCase {
private final static Path TEST_DIR =
new Path(new File(System.getProperty("test.build.data","/tmp")
).toURI().toString().replace(' ', '+'), "testTrash");
protected static Path mkdir(FileSystem fs, Path p) throws IOException {
assertTrue(fs.mkdirs(p));
assertTrue(fs.exists(p));
assertTrue(fs.getFileStatus(p).isDirectory());
return p;
}
// check that the specified file is in Trash
protected static void checkTrash(FileSystem trashFs, Path trashRoot,
Path path) throws IOException {
Path p = Path.mergePaths(trashRoot, path);
assertTrue("Could not find file in trash: "+ p , trashFs.exists(p));
}
// counts how many instances of the file are in the Trash
// they all are in format fileName*
protected static int countSameDeletedFiles(FileSystem fs,
Path trashDir, Path fileName) throws IOException {
final String prefix = fileName.getName();
System.out.println("Counting " + fileName + " in " + trashDir.toString());
// filter that matches all the files that start with fileName*
PathFilter pf = new PathFilter() {
@Override
public boolean accept(Path file) {
return file.getName().startsWith(prefix);
}
};
// run the filter
FileStatus [] fss = fs.listStatus(trashDir, pf);
return fss==null? 0 : fss.length;
}
// check that the specified file is not in Trash
static void checkNotInTrash(FileSystem fs, Path trashRoot, String pathname)
throws IOException {
Path p = new Path(trashRoot+"/"+ new Path(pathname).getName());
assertTrue(!fs.exists(p));
}
/**
* Test trash for the shell's delete command for the file system fs
* @param fs
* @param base - the base path where files are created
* @throws IOException
*/
public static void trashShell(final FileSystem fs, final Path base)
throws IOException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", fs.getUri().toString());
trashShell(conf, base, null, null);
}
/**
* Test trash for the shell's delete command for the default file system
* specified in the paramter conf
* @param conf
* @param base - the base path where files are created
* @param trashRoot - the expected place where the trashbin resides
* @throws IOException
*/
public static void trashShell(final Configuration conf, final Path base,
FileSystem trashRootFs, Path trashRoot)
throws IOException {
FileSystem fs = FileSystem.get(conf);
conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
assertFalse(new Trash(conf).isEnabled());
conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
assertTrue(new Trash(conf).isEnabled());
FsShell shell = new FsShell();
shell.setConf(conf);
if (trashRoot == null) {
trashRoot = shell.getCurrentTrashDir();
}
if (trashRootFs == null) {
trashRootFs = fs;
}
// First create a new directory with mkdirs
Path myPath = new Path(base, "test/mkdirs");
mkdir(fs, myPath);
// Second, create a file in that directory.
Path myFile = new Path(base, "test/mkdirs/myFile");
writeFile(fs, myFile, 10);
// Verify that expunge without Trash directory
// won't throw Exception
{
String[] args = new String[1];
args[0] = "-expunge";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
}
// Verify that we succeed in removing the file we created.
// This should go into Trash.
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = myFile.toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
checkTrash(trashRootFs, trashRoot, fs.makeQualified(myFile));
}
// Verify that we can recreate the file
writeFile(fs, myFile, 10);
// Verify that we succeed in removing the file we re-created
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = new Path(base, "test/mkdirs/myFile").toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
}
// Verify that we can recreate the file
writeFile(fs, myFile, 10);
// Verify that we succeed in removing the whole directory
// along with the file inside it.
{
String[] args = new String[2];
args[0] = "-rmr";
args[1] = new Path(base, "test/mkdirs").toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
}
// recreate directory
mkdir(fs, myPath);
// Verify that we succeed in removing the whole directory
{
String[] args = new String[2];
args[0] = "-rmr";
args[1] = new Path(base, "test/mkdirs").toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
}
// Check that we can delete a file from the trash
{
Path toErase = new Path(trashRoot, "toErase");
int retVal = -1;
writeFile(trashRootFs, toErase, 10);
try {
retVal = shell.run(new String[] {"-rm", toErase.toString()});
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(retVal == 0);
checkNotInTrash (trashRootFs, trashRoot, toErase.toString());
checkNotInTrash (trashRootFs, trashRoot, toErase.toString()+".1");
}
// simulate Trash removal
{
String[] args = new String[1];
args[0] = "-expunge";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
}
// verify that after expunging the Trash, it really goes away
checkNotInTrash(trashRootFs, trashRoot, new Path(base, "test/mkdirs/myFile").toString());
// recreate directory and file
mkdir(fs, myPath);
writeFile(fs, myFile, 10);
// remove file first, then remove directory
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = myFile.toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
checkTrash(trashRootFs, trashRoot, myFile);
args = new String[2];
args[0] = "-rmr";
args[1] = myPath.toString();
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
checkTrash(trashRootFs, trashRoot, myPath);
}
// attempt to remove parent of trash
{
String[] args = new String[2];
args[0] = "-rmr";
args[1] = trashRoot.getParent().getParent().toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertEquals("exit code", 1, val);
assertTrue(trashRootFs.exists(trashRoot));
}
// Verify skip trash option really works
// recreate directory and file
mkdir(fs, myPath);
writeFile(fs, myFile, 10);
// Verify that skip trash option really skips the trash for files (rm)
{
String[] args = new String[3];
args[0] = "-rm";
args[1] = "-skipTrash";
args[2] = myFile.toString();
int val = -1;
try {
// Clear out trash
assertEquals("-expunge failed",
0, shell.run(new String [] { "-expunge" } ));
val = shell.run(args);
}catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertFalse("Expected TrashRoot (" + trashRoot +
") to exist in file system:"
+ trashRootFs.getUri(),
trashRootFs.exists(trashRoot)); // No new Current should be created
assertFalse(fs.exists(myFile));
assertTrue(val == 0);
}
// recreate directory and file
mkdir(fs, myPath);
writeFile(fs, myFile, 10);
// Verify that skip trash option really skips the trash for rmr
{
String[] args = new String[3];
args[0] = "-rmr";
args[1] = "-skipTrash";
args[2] = myPath.toString();
int val = -1;
try {
// Clear out trash
assertEquals(0, shell.run(new String [] { "-expunge" } ));
val = shell.run(args);
}catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertFalse(trashRootFs.exists(trashRoot)); // No new Current should be created
assertFalse(fs.exists(myPath));
assertFalse(fs.exists(myFile));
assertTrue(val == 0);
}
// deleting same file multiple times
{
int val = -1;
mkdir(fs, myPath);
try {
assertEquals(0, shell.run(new String [] { "-expunge" } ));
} catch (Exception e) {
System.err.println("Exception raised from fs expunge " +
e.getLocalizedMessage());
}
// create a file in that directory.
myFile = new Path(base, "test/mkdirs/myFile");
String [] args = new String[] {"-rm", myFile.toString()};
int num_runs = 10;
for(int i=0;i<num_runs; i++) {
//create file
writeFile(fs, myFile, 10);
// delete file
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val==0);
}
// current trash directory
Path trashDir = Path.mergePaths(new Path(trashRoot.toUri().getPath()),
new Path(myFile.getParent().toUri().getPath()));
System.out.println("Deleting same myFile: myFile.parent=" + myFile.getParent().toUri().getPath() +
"; trashroot="+trashRoot.toUri().getPath() +
"; trashDir=" + trashDir.toUri().getPath());
int count = countSameDeletedFiles(fs, trashDir, myFile);
System.out.println("counted " + count + " files " + myFile.getName() + "* in " + trashDir);
assertTrue(count==num_runs);
}
//Verify skipTrash option is suggested when rm fails due to its absence
{
String[] args = new String[2];
args[0] = "-rmr";
args[1] = "/"; //This always contains trash directory
PrintStream stdout = System.out;
PrintStream stderr = System.err;
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
PrintStream newOut = new PrintStream(byteStream);
System.setOut(newOut);
System.setErr(newOut);
try {
shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
String output = byteStream.toString();
System.setOut(stdout);
System.setErr(stderr);
assertTrue("skipTrash wasn't suggested as remedy to failed rm command" +
" or we deleted / even though we could not get server defaults",
output.indexOf("Consider using -skipTrash option") != -1 ||
output.indexOf("Failed to determine server trash configuration") != -1);
}
// Verify old checkpoint format is recognized
{
// emulate two old trash checkpoint directories, one that is old enough
// to be deleted on the next expunge and one that isn't.
long trashInterval = conf.getLong(FS_TRASH_INTERVAL_KEY,
FS_TRASH_INTERVAL_DEFAULT);
long now = Time.now();
DateFormat oldCheckpointFormat = new SimpleDateFormat("yyMMddHHmm");
Path dirToDelete = new Path(trashRoot.getParent(),
oldCheckpointFormat.format(now - (trashInterval * 60 * 1000) - 1));
Path dirToKeep = new Path(trashRoot.getParent(),
oldCheckpointFormat.format(now));
mkdir(trashRootFs, dirToDelete);
mkdir(trashRootFs, dirToKeep);
// Clear out trash
int rc = -1;
try {
rc = shell.run(new String [] { "-expunge" } );
} catch (Exception e) {
System.err.println("Exception raised from fs expunge " +
e.getLocalizedMessage());
}
assertEquals(0, rc);
assertFalse("old checkpoint format not recognized",
trashRootFs.exists(dirToDelete));
assertTrue("old checkpoint format directory should not be removed",
trashRootFs.exists(dirToKeep));
}
}
public static void trashNonDefaultFS(Configuration conf) throws IOException {
conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
// attempt non-default FileSystem trash
{
final FileSystem lfs = FileSystem.getLocal(conf);
Path p = TEST_DIR;
Path f = new Path(p, "foo/bar");
if (lfs.exists(p)) {
lfs.delete(p, true);
}
try {
writeFile(lfs, f, 10);
FileSystem.closeAll();
FileSystem localFs = FileSystem.get(URI.create("file:///"), conf);
Trash lTrash = new Trash(localFs, conf);
lTrash.moveToTrash(f.getParent());
checkTrash(localFs, lTrash.getCurrentTrashDir(), f);
} finally {
if (lfs.exists(p)) {
lfs.delete(p, true);
}
}
}
}
public void testTrash() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
trashShell(FileSystem.getLocal(conf), TEST_DIR);
}
public void testNonDefaultFS() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
conf.set("fs.defaultFS", "invalid://host/bar/foo");
trashNonDefaultFS(conf);
}
public void testPluggableTrash() throws IOException {
Configuration conf = new Configuration();
// Test plugged TrashPolicy
conf.setClass("fs.trash.classname", TestTrashPolicy.class, TrashPolicy.class);
Trash trash = new Trash(conf);
assertTrue(trash.getTrashPolicy().getClass().equals(TestTrashPolicy.class));
}
public void testTrashEmptier() throws Exception {
Configuration conf = new Configuration();
// Trash with 12 second deletes and 6 seconds checkpoints
conf.set(FS_TRASH_INTERVAL_KEY, "0.2"); // 12 seconds
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
conf.set(FS_TRASH_CHECKPOINT_INTERVAL_KEY, "0.1"); // 6 seconds
FileSystem fs = FileSystem.getLocal(conf);
conf.set("fs.default.name", fs.getUri().toString());
Trash trash = new Trash(conf);
// Start Emptier in background
Runnable emptier = trash.getEmptier();
Thread emptierThread = new Thread(emptier);
emptierThread.start();
FsShell shell = new FsShell();
shell.setConf(conf);
shell.init();
// First create a new directory with mkdirs
Path myPath = new Path(TEST_DIR, "test/mkdirs");
mkdir(fs, myPath);
int fileIndex = 0;
Set<String> checkpoints = new HashSet<String>();
while (true) {
// Create a file with a new name
Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
writeFile(fs, myFile, 10);
// Delete the file to trash
String[] args = new String[2];
args[0] = "-rm";
args[1] = myFile.toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
Path trashDir = shell.getCurrentTrashDir();
FileStatus files[] = fs.listStatus(trashDir.getParent());
// Scan files in .Trash and add them to set of checkpoints
for (FileStatus file : files) {
String fileName = file.getPath().getName();
checkpoints.add(fileName);
}
// If checkpoints has 4 objects it is Current + 3 checkpoint directories
if (checkpoints.size() == 4) {
// The actual contents should be smaller since the last checkpoint
// should've been deleted and Current might not have been recreated yet
assertTrue(checkpoints.size() > files.length);
break;
}
Thread.sleep(5000);
}
emptierThread.interrupt();
emptierThread.join();
}
/**
* @see TestCase#tearDown()
*/
@Override
protected void tearDown() throws IOException {
File trashDir = new File(TEST_DIR.toUri().getPath());
if (trashDir.exists() && !FileUtil.fullyDelete(trashDir)) {
throw new IOException("Cannot remove data directory: " + trashDir);
}
}
static class TestLFS extends LocalFileSystem {
Path home;
TestLFS() {
this(new Path(TEST_DIR, "user/test"));
}
TestLFS(final Path home) {
super(new RawLocalFileSystem() {
@Override
protected Path getInitialWorkingDirectory() {
return makeQualified(home);
}
@Override
public Path getHomeDirectory() {
return makeQualified(home);
}
});
this.home = home;
}
@Override
public Path getHomeDirectory() {
return home;
}
}
/**
* test same file deletion - multiple time
* this is more of a performance test - shouldn't be run as a unit test
* @throws IOException
*/
public static void performanceTestDeleteSameFile() throws IOException{
Path base = TEST_DIR;
Configuration conf = new Configuration();
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
FileSystem fs = FileSystem.getLocal(conf);
conf.set("fs.defaultFS", fs.getUri().toString());
conf.setLong(FS_TRASH_INTERVAL_KEY, 10); //minutes..
FsShell shell = new FsShell();
shell.setConf(conf);
//Path trashRoot = null;
Path myPath = new Path(base, "test/mkdirs");
mkdir(fs, myPath);
// create a file in that directory.
Path myFile;
long start;
long first = 0;
int retVal = 0;
int factor = 10; // how much slower any of subsequent deletion can be
myFile = new Path(base, "test/mkdirs/myFile");
String [] args = new String[] {"-rm", myFile.toString()};
int iters = 1000;
for(int i=0;i<iters; i++) {
writeFile(fs, myFile, 10);
start = Time.now();
try {
retVal = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
throw new IOException(e.getMessage());
}
assertTrue(retVal == 0);
long iterTime = Time.now() - start;
// take median of the first 10 runs
if(i<10) {
if(i==0) {
first = iterTime;
}
else {
first = (first + iterTime)/2;
}
}
// we don't want to print every iteration - let's do every 10th
int print_freq = iters/10;
if(i>10) {
if((i%print_freq) == 0)
System.out.println("iteration="+i+";res =" + retVal + "; start=" + start
+ "; iterTime = " + iterTime + " vs. firstTime=" + first);
long factoredTime = first*factor;
assertTrue(iterTime<factoredTime); //no more then twice of median first 10
}
}
}
public static void main(String [] arg) throws IOException{
// run performance piece as a separate test
performanceTestDeleteSameFile();
}
// Test TrashPolicy. Don't care about implementation.
public static class TestTrashPolicy extends TrashPolicy {
public TestTrashPolicy() { }
@Override
public void initialize(Configuration conf, FileSystem fs, Path home) {
}
@Override
public boolean isEnabled() {
return false;
}
@Override
public boolean moveToTrash(Path path) throws IOException {
return false;
}
@Override
public void createCheckpoint() throws IOException {
}
@Override
public void deleteCheckpoint() throws IOException {
}
@Override
public Path getCurrentTrashDir() {
return null;
}
@Override
public Runnable getEmptier() throws IOException {
return null;
}
}
}
| 22,938 | 30.552957 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.Set;
import org.junit.Assert;
import org.apache.hadoop.util.ShutdownHookManager;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
/**
* Tests {@link FileContext.#deleteOnExit(Path)} functionality.
*/
public class TestFileContextDeleteOnExit {
private static int blockSize = 1024;
private static int numBlocks = 2;
private final FileContextTestHelper helper = new FileContextTestHelper();
private FileContext fc;
@Before
public void setup() throws IOException {
fc = FileContext.getLocalFSFileContext();
}
@After
public void tearDown() throws IOException {
fc.delete(helper.getTestRootPath(fc), true);
}
private void checkDeleteOnExitData(int size, FileContext fc, Path... paths) {
Assert.assertEquals(size, FileContext.DELETE_ON_EXIT.size());
Set<Path> set = FileContext.DELETE_ON_EXIT.get(fc);
Assert.assertEquals(paths.length, (set == null ? 0 : set.size()));
for (Path path : paths) {
Assert.assertTrue(set.contains(path));
}
}
@Test
public void testDeleteOnExit() throws Exception {
// Create deleteOnExit entries
Path file1 = helper.getTestRootPath(fc, "file1");
createFile(fc, file1, numBlocks, blockSize);
fc.deleteOnExit(file1);
checkDeleteOnExitData(1, fc, file1);
// Ensure shutdown hook is added
Assert.assertTrue(ShutdownHookManager.get().hasShutdownHook(FileContext.FINALIZER));
Path file2 = helper.getTestRootPath(fc, "dir1/file2");
createFile(fc, file2, numBlocks, blockSize);
fc.deleteOnExit(file2);
checkDeleteOnExitData(1, fc, file1, file2);
Path dir = helper.getTestRootPath(fc, "dir3/dir4/dir5/dir6");
createFile(fc, dir, numBlocks, blockSize);
fc.deleteOnExit(dir);
checkDeleteOnExitData(1, fc, file1, file2, dir);
// trigger deleteOnExit and ensure the registered
// paths are cleaned up
FileContext.FINALIZER.run();
checkDeleteOnExitData(0, fc, new Path[0]);
Assert.assertFalse(exists(fc, file1));
Assert.assertFalse(exists(fc, file2));
Assert.assertFalse(exists(fc, dir));
}
}
| 3,062 | 32.659341 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Level;
import static org.junit.Assert.*;
import org.junit.Test;
import org.junit.BeforeClass;
/**
* This class tests the FileStatus API.
*/
public class TestListFiles {
{
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
}
static final long seed = 0xDEADBEEFL;
final protected static Configuration conf = new Configuration();
protected static FileSystem fs;
protected static Path TEST_DIR;
final private static int FILE_LEN = 10;
private static Path FILE1;
private static Path DIR1;
private static Path FILE2;
private static Path FILE3;
static {
setTestPaths(new Path(
System.getProperty("test.build.data", "build/test/data/work-dir/localfs"),
"main_"));
}
protected static Path getTestDir() {
return TEST_DIR;
}
/**
* Sets the root testing directory and reinitializes any additional test paths
* that are under the root. This method is intended to be called from a
* subclass's @BeforeClass method if there is a need to override the testing
* directory.
*
* @param testDir Path root testing directory
*/
protected static void setTestPaths(Path testDir) {
TEST_DIR = testDir;
FILE1 = new Path(TEST_DIR, "file1");
DIR1 = new Path(TEST_DIR, "dir1");
FILE2 = new Path(DIR1, "file2");
FILE3 = new Path(DIR1, "file3");
}
@BeforeClass
public static void testSetUp() throws Exception {
fs = FileSystem.getLocal(conf);
fs.delete(TEST_DIR, true);
}
private static void writeFile(FileSystem fileSys, Path name, int fileSize)
throws IOException {
// Create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
/** Test when input path is a file */
@Test
public void testFile() throws IOException {
fs.mkdirs(TEST_DIR);
writeFile(fs, FILE1, FILE_LEN);
RemoteIterator<LocatedFileStatus> itor = fs.listFiles(
FILE1, true);
LocatedFileStatus stat = itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN, stat.getLen());
assertEquals(fs.makeQualified(FILE1), stat.getPath());
assertEquals(1, stat.getBlockLocations().length);
itor = fs.listFiles(FILE1, false);
stat = itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN, stat.getLen());
assertEquals(fs.makeQualified(FILE1), stat.getPath());
assertEquals(1, stat.getBlockLocations().length);
fs.delete(FILE1, true);
}
/** Test when input path is a directory */
@Test
public void testDirectory() throws IOException {
fs.mkdirs(DIR1);
// test empty directory
RemoteIterator<LocatedFileStatus> itor = fs.listFiles(
DIR1, true);
assertFalse(itor.hasNext());
itor = fs.listFiles(DIR1, false);
assertFalse(itor.hasNext());
// testing directory with 1 file
writeFile(fs, FILE2, FILE_LEN);
itor = fs.listFiles(DIR1, true);
LocatedFileStatus stat = itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN, stat.getLen());
assertEquals(fs.makeQualified(FILE2), stat.getPath());
assertEquals(1, stat.getBlockLocations().length);
itor = fs.listFiles(DIR1, false);
stat = itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN, stat.getLen());
assertEquals(fs.makeQualified(FILE2), stat.getPath());
assertEquals(1, stat.getBlockLocations().length);
// test more complicated directory
writeFile(fs, FILE1, FILE_LEN);
writeFile(fs, FILE3, FILE_LEN);
Set<Path> filesToFind = new HashSet<Path>();
filesToFind.add(fs.makeQualified(FILE1));
filesToFind.add(fs.makeQualified(FILE2));
filesToFind.add(fs.makeQualified(FILE3));
itor = fs.listFiles(TEST_DIR, true);
stat = itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",
filesToFind.remove(stat.getPath()));
stat = itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",
filesToFind.remove(stat.getPath()));
stat = itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",
filesToFind.remove(stat.getPath()));
assertFalse(itor.hasNext());
assertTrue(filesToFind.isEmpty());
itor = fs.listFiles(TEST_DIR, false);
stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fs.makeQualified(FILE1), stat.getPath());
assertFalse(itor.hasNext());
fs.delete(TEST_DIR, true);
}
}
| 5,983 | 30.329843 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
/**
* Test symbolic links using LocalFs.
*/
abstract public class TestSymlinkLocalFS extends SymlinkBaseTest {
// Workaround for HADOOP-9652
static {
RawLocalFileSystem.useStatIfAvailable();
}
@Override
protected String getScheme() {
return "file";
}
@Override
protected String testBaseDir1() throws IOException {
return wrapper.getAbsoluteTestRootDir()+"/test1";
}
@Override
protected String testBaseDir2() throws IOException {
return wrapper.getAbsoluteTestRootDir()+"/test2";
}
@Override
protected URI testURI() {
try {
return new URI("file:///");
} catch (URISyntaxException e) {
return null;
}
}
@Override
protected boolean emulatingSymlinksOnWindows() {
// Java 6 on Windows has very poor symlink support. Specifically
// Specifically File#length and File#renameTo do not work as expected.
// (see HADOOP-9061 for additional details)
// Hence some symlink tests will be skipped.
//
return (Shell.WINDOWS && !Shell.isJava7OrAbove());
}
@Override
public void testCreateDanglingLink() throws IOException {
// Dangling symlinks are not supported on Windows local file system.
assumeTrue(!Path.WINDOWS);
super.testCreateDanglingLink();
}
@Override
public void testCreateFileViaDanglingLinkParent() throws IOException {
assumeTrue(!Path.WINDOWS);
super.testCreateFileViaDanglingLinkParent();
}
@Override
public void testOpenResolvesLinks() throws IOException {
assumeTrue(!Path.WINDOWS);
super.testOpenResolvesLinks();
}
@Override
public void testRecursiveLinks() throws IOException {
assumeTrue(!Path.WINDOWS);
super.testRecursiveLinks();
}
@Override
public void testRenameDirToDanglingSymlink() throws IOException {
assumeTrue(!Path.WINDOWS);
super.testRenameDirToDanglingSymlink();
}
@Override
public void testStatDanglingLink() throws IOException {
assumeTrue(!Path.WINDOWS);
super.testStatDanglingLink();
}
@Test(timeout=1000)
/** lstat a non-existant file using a partially qualified path */
public void testDanglingLinkFilePartQual() throws IOException {
Path filePartQual = new Path(getScheme()+":///doesNotExist");
try {
wrapper.getFileLinkStatus(filePartQual);
fail("Got FileStatus for non-existant file");
} catch (FileNotFoundException f) {
// Expected
}
try {
wrapper.getLinkTarget(filePartQual);
fail("Got link target for non-existant file");
} catch (FileNotFoundException f) {
// Expected
}
}
@Test(timeout=1000)
/** Stat and lstat a dangling link */
public void testDanglingLink() throws IOException {
assumeTrue(!Path.WINDOWS);
Path fileAbs = new Path(testBaseDir1()+"/file");
Path fileQual = new Path(testURI().toString(), fileAbs);
Path link = new Path(testBaseDir1()+"/linkToFile");
Path linkQual = new Path(testURI().toString(), link.toString());
wrapper.createSymlink(fileAbs, link, false);
// Deleting the link using FileContext currently fails because
// resolve looks up LocalFs rather than RawLocalFs for the path
// so we call ChecksumFs delete (which doesn't delete dangling
// links) instead of delegating to delete in RawLocalFileSystem
// which deletes via fullyDelete. testDeleteLink above works
// because the link is not dangling.
//assertTrue(fc.delete(link, false));
FileUtil.fullyDelete(new File(link.toUri().getPath()));
wrapper.createSymlink(fileAbs, link, false);
try {
wrapper.getFileStatus(link);
fail("Got FileStatus for dangling link");
} catch (FileNotFoundException f) {
// Expected. File's exists method returns false for dangling links
}
// We can stat a dangling link
UserGroupInformation user = UserGroupInformation.getCurrentUser();
FileStatus fsd = wrapper.getFileLinkStatus(link);
assertEquals(fileQual, fsd.getSymlink());
assertTrue(fsd.isSymlink());
assertFalse(fsd.isDirectory());
assertEquals(user.getUserName(), fsd.getOwner());
// Compare against user's primary group
assertEquals(user.getGroupNames()[0], fsd.getGroup());
assertEquals(linkQual, fsd.getPath());
// Accessing the link
try {
readFile(link);
fail("Got FileStatus for dangling link");
} catch (FileNotFoundException f) {
// Ditto.
}
// Creating the file makes the link work
createAndWriteFile(fileAbs);
wrapper.getFileStatus(link);
}
@Test(timeout=1000)
/**
* Test getLinkTarget with a partially qualified target.
* NB: Hadoop does not support fully qualified URIs for the
* file scheme (eg file://host/tmp/test).
*/
public void testGetLinkStatusPartQualTarget() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path fileAbs = new Path(testBaseDir1()+"/file");
Path fileQual = new Path(testURI().toString(), fileAbs);
Path dir = new Path(testBaseDir1());
Path link = new Path(testBaseDir1()+"/linkToFile");
Path dirNew = new Path(testBaseDir2());
Path linkNew = new Path(testBaseDir2()+"/linkToFile");
wrapper.delete(dirNew, true);
createAndWriteFile(fileQual);
wrapper.setWorkingDirectory(dir);
// Link target is partially qualified, we get the same back.
wrapper.createSymlink(fileQual, link, false);
assertEquals(fileQual, wrapper.getFileLinkStatus(link).getSymlink());
// Because the target was specified with an absolute path the
// link fails to resolve after moving the parent directory.
wrapper.rename(dir, dirNew);
// The target is still the old path
assertEquals(fileQual, wrapper.getFileLinkStatus(linkNew).getSymlink());
try {
readFile(linkNew);
fail("The link should be dangling now.");
} catch (FileNotFoundException x) {
// Expected.
}
// RawLocalFs only maintains the path part, not the URI, and
// therefore does not support links to other file systems.
Path anotherFs = new Path("hdfs://host:1000/dir/file");
FileUtil.fullyDelete(new File(linkNew.toString()));
try {
wrapper.createSymlink(anotherFs, linkNew, false);
fail("Created a local fs link to a non-local fs");
} catch (IOException x) {
// Excpected.
}
}
/** Test create symlink to . */
@Override
public void testCreateLinkToDot() throws IOException {
try {
super.testCreateLinkToDot();
} catch (IllegalArgumentException iae) {
// Expected.
}
}
@Override
public void testSetTimesSymlinkToFile() throws IOException {
assumeTrue(!Path.WINDOWS);
super.testSetTimesSymlinkToFile();
}
@Override
public void testSetTimesSymlinkToDir() throws IOException {
assumeTrue(!Path.WINDOWS);
super.testSetTimesSymlinkToDir();
}
@Override
public void testSetTimesDanglingLink() throws IOException {
assumeTrue(!Path.WINDOWS);
super.testSetTimesDanglingLink();
}
}
| 8,375 | 32.106719 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.AvroTestUtil;
import org.apache.hadoop.util.Shell;
import com.google.common.base.Joiner;
import junit.framework.TestCase;
public class TestPath extends TestCase {
/**
* Merge a bunch of Path objects into a sorted semicolon-separated
* path string.
*/
public static String mergeStatuses(Path paths[]) {
String pathStrings[] = new String[paths.length];
int i = 0;
for (Path path : paths) {
pathStrings[i++] = path.toUri().getPath();
}
Arrays.sort(pathStrings);
return Joiner.on(";").join(pathStrings);
}
/**
* Merge a bunch of FileStatus objects into a sorted semicolon-separated
* path string.
*/
public static String mergeStatuses(FileStatus statuses[]) {
Path paths[] = new Path[statuses.length];
int i = 0;
for (FileStatus status : statuses) {
paths[i++] = status.getPath();
}
return mergeStatuses(paths);
}
@Test (timeout = 30000)
public void testToString() {
toStringTest("/");
toStringTest("/foo");
toStringTest("/foo/bar");
toStringTest("foo");
toStringTest("foo/bar");
toStringTest("/foo/bar#boo");
toStringTest("foo/bar#boo");
boolean emptyException = false;
try {
toStringTest("");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
emptyException = true;
}
assertTrue(emptyException);
if (Path.WINDOWS) {
toStringTest("c:");
toStringTest("c:/");
toStringTest("c:foo");
toStringTest("c:foo/bar");
toStringTest("c:foo/bar");
toStringTest("c:/foo/bar");
toStringTest("C:/foo/bar#boo");
toStringTest("C:foo/bar#boo");
}
}
private void toStringTest(String pathString) {
assertEquals(pathString, new Path(pathString).toString());
}
@Test (timeout = 30000)
public void testNormalize() throws URISyntaxException {
assertEquals("", new Path(".").toString());
assertEquals("..", new Path("..").toString());
assertEquals("/", new Path("/").toString());
assertEquals("/", new Path("//").toString());
assertEquals("/", new Path("///").toString());
assertEquals("//foo/", new Path("//foo/").toString());
assertEquals("//foo/", new Path("//foo//").toString());
assertEquals("//foo/bar", new Path("//foo//bar").toString());
assertEquals("/foo", new Path("/foo/").toString());
assertEquals("/foo", new Path("/foo/").toString());
assertEquals("foo", new Path("foo/").toString());
assertEquals("foo", new Path("foo//").toString());
assertEquals("foo/bar", new Path("foo//bar").toString());
assertEquals("hdfs://foo/foo2/bar/baz/",
new Path(new URI("hdfs://foo//foo2///bar/baz///")).toString());
if (Path.WINDOWS) {
assertEquals("c:/a/b", new Path("c:\\a\\b").toString());
}
}
@Test (timeout = 30000)
public void testIsAbsolute() {
assertTrue(new Path("/").isAbsolute());
assertTrue(new Path("/foo").isAbsolute());
assertFalse(new Path("foo").isAbsolute());
assertFalse(new Path("foo/bar").isAbsolute());
assertFalse(new Path(".").isAbsolute());
if (Path.WINDOWS) {
assertTrue(new Path("c:/a/b").isAbsolute());
assertFalse(new Path("c:a/b").isAbsolute());
}
}
@Test (timeout = 30000)
public void testParent() {
assertEquals(new Path("/foo"), new Path("/foo/bar").getParent());
assertEquals(new Path("foo"), new Path("foo/bar").getParent());
assertEquals(new Path("/"), new Path("/foo").getParent());
assertEquals(null, new Path("/").getParent());
if (Path.WINDOWS) {
assertEquals(new Path("c:/"), new Path("c:/foo").getParent());
}
}
@Test (timeout = 30000)
public void testChild() {
assertEquals(new Path("."), new Path(".", "."));
assertEquals(new Path("/"), new Path("/", "."));
assertEquals(new Path("/"), new Path(".", "/"));
assertEquals(new Path("/foo"), new Path("/", "foo"));
assertEquals(new Path("/foo/bar"), new Path("/foo", "bar"));
assertEquals(new Path("/foo/bar/baz"), new Path("/foo/bar", "baz"));
assertEquals(new Path("/foo/bar/baz"), new Path("/foo", "bar/baz"));
assertEquals(new Path("foo"), new Path(".", "foo"));
assertEquals(new Path("foo/bar"), new Path("foo", "bar"));
assertEquals(new Path("foo/bar/baz"), new Path("foo", "bar/baz"));
assertEquals(new Path("foo/bar/baz"), new Path("foo/bar", "baz"));
assertEquals(new Path("/foo"), new Path("/bar", "/foo"));
if (Path.WINDOWS) {
assertEquals(new Path("c:/foo"), new Path("/bar", "c:/foo"));
assertEquals(new Path("c:/foo"), new Path("d:/bar", "c:/foo"));
}
}
@Test (timeout = 30000)
public void testPathThreeArgContructor() {
assertEquals(new Path("foo"), new Path(null, null, "foo"));
assertEquals(new Path("scheme:///foo"), new Path("scheme", null, "/foo"));
assertEquals(
new Path("scheme://authority/foo"),
new Path("scheme", "authority", "/foo"));
if (Path.WINDOWS) {
assertEquals(new Path("c:/foo/bar"), new Path(null, null, "c:/foo/bar"));
assertEquals(new Path("c:/foo/bar"), new Path(null, null, "/c:/foo/bar"));
} else {
assertEquals(new Path("./a:b"), new Path(null, null, "a:b"));
}
// Resolution tests
if (Path.WINDOWS) {
assertEquals(
new Path("c:/foo/bar"),
new Path("/fou", new Path(null, null, "c:/foo/bar")));
assertEquals(
new Path("c:/foo/bar"),
new Path("/fou", new Path(null, null, "/c:/foo/bar")));
assertEquals(
new Path("/foo/bar"),
new Path("/foo", new Path(null, null, "bar")));
} else {
assertEquals(
new Path("/foo/bar/a:b"),
new Path("/foo/bar", new Path(null, null, "a:b")));
assertEquals(
new Path("/a:b"),
new Path("/foo/bar", new Path(null, null, "/a:b")));
}
}
@Test (timeout = 30000)
public void testEquals() {
assertFalse(new Path("/").equals(new Path("/foo")));
}
@Test (timeout = 30000)
public void testDots() {
// Test Path(String)
assertEquals(new Path("/foo/bar/baz").toString(), "/foo/bar/baz");
assertEquals(new Path("/foo/bar", ".").toString(), "/foo/bar");
assertEquals(new Path("/foo/bar/../baz").toString(), "/foo/baz");
assertEquals(new Path("/foo/bar/./baz").toString(), "/foo/bar/baz");
assertEquals(new Path("/foo/bar/baz/../../fud").toString(), "/foo/fud");
assertEquals(new Path("/foo/bar/baz/.././../fud").toString(), "/foo/fud");
assertEquals(new Path("../../foo/bar").toString(), "../../foo/bar");
assertEquals(new Path(".././../foo/bar").toString(), "../../foo/bar");
assertEquals(new Path("./foo/bar/baz").toString(), "foo/bar/baz");
assertEquals(new Path("/foo/bar/../../baz/boo").toString(), "/baz/boo");
assertEquals(new Path("foo/bar/").toString(), "foo/bar");
assertEquals(new Path("foo/bar/../baz").toString(), "foo/baz");
assertEquals(new Path("foo/bar/../../baz/boo").toString(), "baz/boo");
// Test Path(Path,Path)
assertEquals(new Path("/foo/bar", "baz/boo").toString(), "/foo/bar/baz/boo");
assertEquals(new Path("foo/bar/","baz/bud").toString(), "foo/bar/baz/bud");
assertEquals(new Path("/foo/bar","../../boo/bud").toString(), "/boo/bud");
assertEquals(new Path("foo/bar","../../boo/bud").toString(), "boo/bud");
assertEquals(new Path(".","boo/bud").toString(), "boo/bud");
assertEquals(new Path("/foo/bar/baz","../../boo/bud").toString(), "/foo/boo/bud");
assertEquals(new Path("foo/bar/baz","../../boo/bud").toString(), "foo/boo/bud");
assertEquals(new Path("../../","../../boo/bud").toString(), "../../../../boo/bud");
assertEquals(new Path("../../foo","../../../boo/bud").toString(), "../../../../boo/bud");
assertEquals(new Path("../../foo/bar","../boo/bud").toString(), "../../foo/boo/bud");
assertEquals(new Path("foo/bar/baz","../../..").toString(), "");
assertEquals(new Path("foo/bar/baz","../../../../..").toString(), "../..");
}
/** Test that Windows paths are correctly handled */
@Test (timeout = 5000)
public void testWindowsPaths() throws URISyntaxException, IOException {
if (!Path.WINDOWS) {
return;
}
assertEquals(new Path("c:\\foo\\bar").toString(), "c:/foo/bar");
assertEquals(new Path("c:/foo/bar").toString(), "c:/foo/bar");
assertEquals(new Path("/c:/foo/bar").toString(), "c:/foo/bar");
assertEquals(new Path("file://c:/foo/bar").toString(), "file://c:/foo/bar");
}
/** Test invalid paths on Windows are correctly rejected */
@Test (timeout = 5000)
public void testInvalidWindowsPaths() throws URISyntaxException, IOException {
if (!Path.WINDOWS) {
return;
}
String [] invalidPaths = {
"hdfs:\\\\\\tmp"
};
for (String path : invalidPaths) {
try {
Path item = new Path(path);
fail("Did not throw for invalid path " + path);
} catch (IllegalArgumentException iae) {
}
}
}
/** Test Path objects created from other Path objects */
@Test (timeout = 30000)
public void testChildParentResolution() throws URISyntaxException, IOException {
Path parent = new Path("foo1://bar1/baz1");
Path child = new Path("foo2://bar2/baz2");
assertEquals(child, new Path(parent, child));
}
@Test (timeout = 30000)
public void testScheme() throws java.io.IOException {
assertEquals("foo:/bar", new Path("foo:/","/bar").toString());
assertEquals("foo://bar/baz", new Path("foo://bar/","/baz").toString());
}
@Test (timeout = 30000)
public void testURI() throws URISyntaxException, IOException {
URI uri = new URI("file:///bar#baz");
Path path = new Path(uri);
assertTrue(uri.equals(new URI(path.toString())));
FileSystem fs = path.getFileSystem(new Configuration());
assertTrue(uri.equals(new URI(fs.makeQualified(path).toString())));
// uri without hash
URI uri2 = new URI("file:///bar/baz");
assertTrue(
uri2.equals(new URI(fs.makeQualified(new Path(uri2)).toString())));
assertEquals("foo://bar/baz#boo", new Path("foo://bar/", new Path(new URI(
"/baz#boo"))).toString());
assertEquals("foo://bar/baz/fud#boo", new Path(new Path(new URI(
"foo://bar/baz#bud")), new Path(new URI("fud#boo"))).toString());
// if the child uri is absolute path
assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
"foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
}
/** Test URIs created from Path objects */
@Test (timeout = 30000)
public void testPathToUriConversion() throws URISyntaxException, IOException {
// Path differs from URI in that it ignores the query part..
assertEquals("? mark char in to URI",
new URI(null, null, "/foo?bar", null, null),
new Path("/foo?bar").toUri());
assertEquals("escape slashes chars in to URI",
new URI(null, null, "/foo\"bar", null, null),
new Path("/foo\"bar").toUri());
assertEquals("spaces in chars to URI",
new URI(null, null, "/foo bar", null, null),
new Path("/foo bar").toUri());
// therefore "foo?bar" is a valid Path, so a URI created from a Path
// has path "foo?bar" where in a straight URI the path part is just "foo"
assertEquals("/foo?bar",
new Path("http://localhost/foo?bar").toUri().getPath());
assertEquals("/foo", new URI("http://localhost/foo?bar").getPath());
// The path part handling in Path is equivalent to URI
assertEquals(new URI("/foo;bar").getPath(), new Path("/foo;bar").toUri().getPath());
assertEquals(new URI("/foo;bar"), new Path("/foo;bar").toUri());
assertEquals(new URI("/foo+bar"), new Path("/foo+bar").toUri());
assertEquals(new URI("/foo-bar"), new Path("/foo-bar").toUri());
assertEquals(new URI("/foo=bar"), new Path("/foo=bar").toUri());
assertEquals(new URI("/foo,bar"), new Path("/foo,bar").toUri());
}
/** Test reserved characters in URIs (and therefore Paths) */
@Test (timeout = 30000)
public void testReservedCharacters() throws URISyntaxException, IOException {
// URI encodes the path
assertEquals("/foo%20bar",
new URI(null, null, "/foo bar", null, null).getRawPath());
// URI#getPath decodes the path
assertEquals("/foo bar",
new URI(null, null, "/foo bar", null, null).getPath());
// URI#toString returns an encoded path
assertEquals("/foo%20bar",
new URI(null, null, "/foo bar", null, null).toString());
assertEquals("/foo%20bar", new Path("/foo bar").toUri().toString());
// Reserved chars are not encoded
assertEquals("/foo;bar", new URI("/foo;bar").getPath());
assertEquals("/foo;bar", new URI("/foo;bar").getRawPath());
assertEquals("/foo+bar", new URI("/foo+bar").getPath());
assertEquals("/foo+bar", new URI("/foo+bar").getRawPath());
// URI#getPath decodes the path part (and URL#getPath does not decode)
assertEquals("/foo bar",
new Path("http://localhost/foo bar").toUri().getPath());
assertEquals("/foo%20bar",
new Path("http://localhost/foo bar").toUri().toURL().getPath());
assertEquals("/foo?bar",
new URI("http", "localhost", "/foo?bar", null, null).getPath());
assertEquals("/foo%3Fbar",
new URI("http", "localhost", "/foo?bar", null, null).
toURL().getPath());
}
@Test (timeout = 30000)
public void testMakeQualified() throws URISyntaxException {
URI defaultUri = new URI("hdfs://host1/dir1");
URI wd = new URI("hdfs://host2/dir2");
// The scheme from defaultUri is used but the path part is not
assertEquals(new Path("hdfs://host1/dir/file"),
new Path("file").makeQualified(defaultUri, new Path("/dir")));
// The defaultUri is only used if the path + wd has no scheme
assertEquals(new Path("hdfs://host2/dir2/file"),
new Path("file").makeQualified(defaultUri, new Path(wd)));
}
@Test (timeout = 30000)
public void testGetName() {
assertEquals("", new Path("/").getName());
assertEquals("foo", new Path("foo").getName());
assertEquals("foo", new Path("/foo").getName());
assertEquals("foo", new Path("/foo/").getName());
assertEquals("bar", new Path("/foo/bar").getName());
assertEquals("bar", new Path("hdfs://host/foo/bar").getName());
}
@Test (timeout = 30000)
public void testAvroReflect() throws Exception {
AvroTestUtil.testReflect
(new Path("foo"),
"{\"type\":\"string\",\"java-class\":\"org.apache.hadoop.fs.Path\"}");
}
@Test (timeout = 30000)
public void testGlobEscapeStatus() throws Exception {
// This test is not meaningful on Windows where * is disallowed in file name.
if (Shell.WINDOWS) return;
FileSystem lfs = FileSystem.getLocal(new Configuration());
Path testRoot = lfs.makeQualified(new Path(
System.getProperty("test.build.data","test/build/data"),
"testPathGlob"));
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
assertTrue(lfs.isDirectory(testRoot));
lfs.setWorkingDirectory(testRoot);
// create a couple dirs with file in them
Path paths[] = new Path[]{
new Path(testRoot, "*/f"),
new Path(testRoot, "d1/f"),
new Path(testRoot, "d2/f")
};
Arrays.sort(paths);
for (Path p : paths) {
lfs.create(p).close();
assertTrue(lfs.exists(p));
}
// try the non-globbed listStatus
FileStatus stats[] = lfs.listStatus(new Path(testRoot, "*"));
assertEquals(1, stats.length);
assertEquals(new Path(testRoot, "*/f"), stats[0].getPath());
// ensure globStatus with "*" finds all dir contents
stats = lfs.globStatus(new Path(testRoot, "*"));
Arrays.sort(stats);
Path parentPaths[] = new Path[paths.length];
for (int i = 0; i < paths.length; i++) {
parentPaths[i] = paths[i].getParent();
}
assertEquals(mergeStatuses(parentPaths), mergeStatuses(stats));
// ensure that globStatus with an escaped "\*" only finds "*"
stats = lfs.globStatus(new Path(testRoot, "\\*"));
assertEquals(1, stats.length);
assertEquals(new Path(testRoot, "*"), stats[0].getPath());
// try to glob the inner file for all dirs
stats = lfs.globStatus(new Path(testRoot, "*/f"));
assertEquals(paths.length, stats.length);
assertEquals(mergeStatuses(paths), mergeStatuses(stats));
// try to get the inner file for only the "*" dir
stats = lfs.globStatus(new Path(testRoot, "\\*/f"));
assertEquals(1, stats.length);
assertEquals(new Path(testRoot, "*/f"), stats[0].getPath());
// try to glob all the contents of the "*" dir
stats = lfs.globStatus(new Path(testRoot, "\\*/*"));
assertEquals(1, stats.length);
assertEquals(new Path(testRoot, "*/f"), stats[0].getPath());
}
@Test (timeout = 30000)
public void testMergePaths() {
assertEquals(new Path("/foo/bar"),
Path.mergePaths(new Path("/foo"),
new Path("/bar")));
assertEquals(new Path("/foo/bar/baz"),
Path.mergePaths(new Path("/foo/bar"),
new Path("/baz")));
assertEquals(new Path("/foo/bar/baz"),
Path.mergePaths(new Path("/foo"),
new Path("/bar/baz")));
assertEquals(new Path(Shell.WINDOWS ? "/C:/foo/bar" : "/C:/foo/C:/bar"),
Path.mergePaths(new Path("/C:/foo"),
new Path("/C:/bar")));
assertEquals(new Path(Shell.WINDOWS ? "/C:/bar" : "/C:/C:/bar"),
Path.mergePaths(new Path("/C:/"),
new Path("/C:/bar")));
assertEquals(new Path("/bar"),
Path.mergePaths(new Path("/"), new Path("/bar")));
assertEquals(new Path("viewfs:///foo/bar"),
Path.mergePaths(new Path("viewfs:///foo"),
new Path("file:///bar")));
assertEquals(new Path("viewfs://vfsauthority/foo/bar"),
Path.mergePaths(new Path("viewfs://vfsauthority/foo"),
new Path("file://fileauthority/bar")));
}
@Test (timeout = 30000)
public void testIsWindowsAbsolutePath() {
if (!Shell.WINDOWS) return;
assertTrue(Path.isWindowsAbsolutePath("C:\\test", false));
assertTrue(Path.isWindowsAbsolutePath("C:/test", false));
assertTrue(Path.isWindowsAbsolutePath("/C:/test", true));
assertFalse(Path.isWindowsAbsolutePath("/test", false));
assertFalse(Path.isWindowsAbsolutePath("/test", true));
assertFalse(Path.isWindowsAbsolutePath("C:test", false));
assertFalse(Path.isWindowsAbsolutePath("/C:test", true));
}
}
| 19,632 | 37.800395 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.net.URI;
import java.util.EnumSet;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.test.GenericTestUtils;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
import org.junit.Test;
import org.junit.Before;
import org.junit.After;
/**
* Base test for symbolic links
*/
public abstract class SymlinkBaseTest {
// Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
static {
FileSystem.enableSymlinks();
}
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
static final int fileSize = 16384;
static final int numBlocks = fileSize / blockSize;
protected static FSTestWrapper wrapper;
abstract protected String getScheme();
abstract protected String testBaseDir1() throws IOException;
abstract protected String testBaseDir2() throws IOException;
abstract protected URI testURI();
// Returns true if the filesystem is emulating symlink support. Certain
// checks will be bypassed if that is the case.
//
protected boolean emulatingSymlinksOnWindows() {
return false;
}
protected IOException unwrapException(IOException e) {
return e;
}
protected static void createAndWriteFile(Path p) throws IOException {
createAndWriteFile(wrapper, p);
}
protected static void createAndWriteFile(FSTestWrapper wrapper, Path p)
throws IOException {
wrapper.createFile(p, numBlocks, CreateOpts.createParent(),
CreateOpts.repFac((short) 1), CreateOpts.blockSize(blockSize));
}
protected static void readFile(Path p) throws IOException {
wrapper.readFile(p, fileSize);
}
protected static void appendToFile(Path p) throws IOException {
wrapper.appendToFile(p, numBlocks,
CreateOpts.blockSize(blockSize));
}
@Before
public void setUp() throws Exception {
wrapper.mkdir(new Path(testBaseDir1()), FileContext.DEFAULT_PERM, true);
wrapper.mkdir(new Path(testBaseDir2()), FileContext.DEFAULT_PERM, true);
}
@After
public void tearDown() throws Exception {
wrapper.delete(new Path(testBaseDir1()), true);
wrapper.delete(new Path(testBaseDir2()), true);
}
@Test(timeout=10000)
/** The root is not a symlink */
public void testStatRoot() throws IOException {
assertFalse(wrapper.getFileLinkStatus(new Path("/")).isSymlink());
}
@Test(timeout=10000)
/** Test setWorkingDirectory not resolves symlinks */
public void testSetWDNotResolvesLinks() throws IOException {
Path dir = new Path(testBaseDir1());
Path linkToDir = new Path(testBaseDir1()+"/link");
wrapper.createSymlink(dir, linkToDir, false);
wrapper.setWorkingDirectory(linkToDir);
assertEquals(linkToDir.getName(), wrapper.getWorkingDirectory().getName());
}
@Test(timeout=10000)
/** Test create a dangling link */
public void testCreateDanglingLink() throws IOException {
Path file = new Path("/noSuchFile");
Path link = new Path(testBaseDir1()+"/link");
wrapper.createSymlink(file, link, false);
try {
wrapper.getFileStatus(link);
fail("Got file status of non-existant file");
} catch (FileNotFoundException f) {
// Expected
}
wrapper.delete(link, false);
}
@Test(timeout=10000)
/** Test create a link to null and empty path */
public void testCreateLinkToNullEmpty() throws IOException {
Path link = new Path(testBaseDir1()+"/link");
try {
wrapper.createSymlink(null, link, false);
fail("Can't create symlink to null");
} catch (java.lang.NullPointerException e) {
// Expected, create* with null yields NPEs
}
try {
wrapper.createSymlink(new Path(""), link, false);
fail("Can't create symlink to empty string");
} catch (java.lang.IllegalArgumentException e) {
// Expected, Path("") is invalid
}
}
@Test(timeout=10000)
/** Create a link with createParent set */
public void testCreateLinkCanCreateParent() throws IOException {
Path file = new Path(testBaseDir1()+"/file");
Path link = new Path(testBaseDir2()+"/linkToFile");
createAndWriteFile(file);
wrapper.delete(new Path(testBaseDir2()), true);
try {
wrapper.createSymlink(file, link, false);
fail("Created link without first creating parent dir");
} catch (IOException x) {
// Expected. Need to create testBaseDir2() first.
}
assertFalse(wrapper.exists(new Path(testBaseDir2())));
wrapper.createSymlink(file, link, true);
readFile(link);
}
@Test(timeout=10000)
/** Try to create a directory given a path that refers to a symlink */
public void testMkdirExistingLink() throws IOException {
Path file = new Path(testBaseDir1() + "/targetFile");
createAndWriteFile(file);
Path dir = new Path(testBaseDir1()+"/link");
wrapper.createSymlink(file, dir, false);
try {
wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
fail("Created a dir where a symlink exists");
} catch (FileAlreadyExistsException e) {
// Expected. The symlink already exists.
} catch (IOException e) {
// LocalFs just throws an IOException
assertEquals("file", getScheme());
}
}
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
Path dir = new Path(testBaseDir1()+"/dangling");
Path file = new Path(testBaseDir1()+"/dangling/file");
wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
FSDataOutputStream out;
try {
out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
CreateOpts.repFac((short) 1),
CreateOpts.blockSize(blockSize));
out.close();
fail("Created a link with dangling link parent");
} catch (FileNotFoundException e) {
// Expected. The parent is dangling.
}
}
@Test(timeout=10000)
/** Delete a link */
public void testDeleteLink() throws IOException {
Path file = new Path(testBaseDir1()+"/file");
Path link = new Path(testBaseDir1()+"/linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
readFile(link);
wrapper.delete(link, false);
try {
readFile(link);
fail("Symlink should have been deleted");
} catch (IOException x) {
// Expected
}
// If we deleted the link we can put it back
wrapper.createSymlink(file, link, false);
}
@Test(timeout=10000)
/** Ensure open resolves symlinks */
public void testOpenResolvesLinks() throws IOException {
Path file = new Path(testBaseDir1()+"/noSuchFile");
Path link = new Path(testBaseDir1()+"/link");
wrapper.createSymlink(file, link, false);
try {
wrapper.open(link);
fail("link target does not exist");
} catch (FileNotFoundException x) {
// Expected
}
wrapper.delete(link, false);
}
@Test(timeout=10000)
/** Stat a link to a file */
public void testStatLinkToFile() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path file = new Path(testBaseDir1()+"/file");
Path linkToFile = new Path(testBaseDir1()+"/linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, linkToFile, false);
assertFalse(wrapper.getFileLinkStatus(linkToFile).isDirectory());
assertTrue(wrapper.isSymlink(linkToFile));
assertTrue(wrapper.isFile(linkToFile));
assertFalse(wrapper.isDir(linkToFile));
assertEquals(file, wrapper.getLinkTarget(linkToFile));
// The local file system does not fully resolve the link
// when obtaining the file status
if (!"file".equals(getScheme())) {
assertEquals(wrapper.getFileStatus(file),
wrapper.getFileStatus(linkToFile));
assertEquals(wrapper.makeQualified(file),
wrapper.getFileStatus(linkToFile).getPath());
assertEquals(wrapper.makeQualified(linkToFile),
wrapper.getFileLinkStatus(linkToFile).getPath());
}
}
@Test(timeout=10000)
/** Stat a relative link to a file */
public void testStatRelLinkToFile() throws IOException {
assumeTrue(!"file".equals(getScheme()));
Path file = new Path(testBaseDir1(), "file");
Path linkToFile = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(new Path("file"), linkToFile, false);
assertEquals(wrapper.getFileStatus(file),
wrapper.getFileStatus(linkToFile));
assertEquals(wrapper.makeQualified(file),
wrapper.getFileStatus(linkToFile).getPath());
assertEquals(wrapper.makeQualified(linkToFile),
wrapper.getFileLinkStatus(linkToFile).getPath());
}
@Test(timeout=10000)
/** Stat a link to a directory */
public void testStatLinkToDir() throws IOException {
Path dir = new Path(testBaseDir1());
Path linkToDir = new Path(testBaseDir1()+"/linkToDir");
wrapper.createSymlink(dir, linkToDir, false);
assertFalse(wrapper.getFileStatus(linkToDir).isSymlink());
assertTrue(wrapper.isDir(linkToDir));
assertFalse(wrapper.getFileLinkStatus(linkToDir).isDirectory());
assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink());
assertFalse(wrapper.isFile(linkToDir));
assertTrue(wrapper.isDir(linkToDir));
assertEquals(dir, wrapper.getLinkTarget(linkToDir));
}
@Test(timeout=10000)
/** Stat a dangling link */
public void testStatDanglingLink() throws IOException {
Path file = new Path("/noSuchFile");
Path link = new Path(testBaseDir1()+"/link");
wrapper.createSymlink(file, link, false);
assertFalse(wrapper.getFileLinkStatus(link).isDirectory());
assertTrue(wrapper.getFileLinkStatus(link).isSymlink());
}
@Test(timeout=10000)
/** Stat a non-existant file */
public void testStatNonExistentFiles() throws IOException {
Path fileAbs = new Path("/doesNotExist");
try {
wrapper.getFileLinkStatus(fileAbs);
fail("Got FileStatus for non-existant file");
} catch (FileNotFoundException f) {
// Expected
}
try {
wrapper.getLinkTarget(fileAbs);
fail("Got link target for non-existant file");
} catch (FileNotFoundException f) {
// Expected
}
}
@Test(timeout=10000)
/** Test stat'ing a regular file and directory */
public void testStatNonLinks() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1()+"/file");
createAndWriteFile(file);
try {
wrapper.getLinkTarget(dir);
fail("Lstat'd a non-symlink");
} catch (IOException e) {
// Expected.
}
try {
wrapper.getLinkTarget(file);
fail("Lstat'd a non-symlink");
} catch (IOException e) {
// Expected.
}
}
@Test(timeout=10000)
/** Test links that link to each other */
public void testRecursiveLinks() throws IOException {
Path link1 = new Path(testBaseDir1()+"/link1");
Path link2 = new Path(testBaseDir1()+"/link2");
wrapper.createSymlink(link1, link2, false);
wrapper.createSymlink(link2, link1, false);
try {
readFile(link1);
fail("Read recursive link");
} catch (FileNotFoundException f) {
// LocalFs throws sub class of IOException, since File.exists
// returns false for a link to link.
} catch (IOException x) {
assertEquals("Possible cyclic loop while following symbolic link "+
link1.toString(), x.getMessage());
}
}
/* Assert that the given link to a file behaves as expected. */
private void checkLink(Path linkAbs, Path expectedTarget, Path targetQual)
throws IOException {
// If we are emulating symlinks then many of these checks will fail
// so we skip them.
//
assumeTrue(!emulatingSymlinksOnWindows());
Path dir = new Path(testBaseDir1());
// isFile/Directory
assertTrue(wrapper.isFile(linkAbs));
assertFalse(wrapper.isDir(linkAbs));
// Check getFileStatus
assertFalse(wrapper.getFileStatus(linkAbs).isSymlink());
assertFalse(wrapper.getFileStatus(linkAbs).isDirectory());
assertEquals(fileSize, wrapper.getFileStatus(linkAbs).getLen());
// Check getFileLinkStatus
assertTrue(wrapper.isSymlink(linkAbs));
assertFalse(wrapper.getFileLinkStatus(linkAbs).isDirectory());
// Check getSymlink always returns a qualified target, except
// when partially qualified paths are used (see tests below).
assertEquals(targetQual.toString(),
wrapper.getFileLinkStatus(linkAbs).getSymlink().toString());
assertEquals(targetQual, wrapper.getFileLinkStatus(linkAbs).getSymlink());
// Check that the target is qualified using the file system of the
// path used to access the link (if the link target was not specified
// fully qualified, in that case we use the link target verbatim).
if (!"file".equals(getScheme())) {
FileContext localFc = FileContext.getLocalFSFileContext();
Path linkQual = new Path(testURI().toString(), linkAbs);
assertEquals(targetQual,
localFc.getFileLinkStatus(linkQual).getSymlink());
}
// Check getLinkTarget
assertEquals(expectedTarget, wrapper.getLinkTarget(linkAbs));
// Now read using all path types..
wrapper.setWorkingDirectory(dir);
readFile(new Path("linkToFile"));
readFile(linkAbs);
// And fully qualified.. (NB: for local fs this is partially qualified)
readFile(new Path(testURI().toString(), linkAbs));
// And partially qualified..
boolean failureExpected = true;
// local files are special cased, no authority
if ("file".equals(getScheme())) {
failureExpected = false;
}
// FileSystem automatically adds missing authority if scheme matches default
else if (wrapper instanceof FileSystemTestWrapper) {
failureExpected = false;
}
try {
readFile(new Path(getScheme()+":///"+testBaseDir1()+"/linkToFile"));
assertFalse(failureExpected);
} catch (Exception e) {
if (!failureExpected) {
throw new IOException(e);
}
//assertTrue(failureExpected);
}
// Now read using a different file context (for HDFS at least)
if (wrapper instanceof FileContextTestWrapper
&& !"file".equals(getScheme())) {
FSTestWrapper localWrapper = wrapper.getLocalFSWrapper();
localWrapper.readFile(new Path(testURI().toString(), linkAbs), fileSize);
}
}
@Test(timeout=10000)
/** Test creating a symlink using relative paths */
public void testCreateLinkUsingRelPaths() throws IOException {
Path fileAbs = new Path(testBaseDir1(), "file");
Path linkAbs = new Path(testBaseDir1(), "linkToFile");
Path schemeAuth = new Path(testURI().toString());
Path fileQual = new Path(schemeAuth, testBaseDir1()+"/file");
createAndWriteFile(fileAbs);
wrapper.setWorkingDirectory(new Path(testBaseDir1()));
wrapper.createSymlink(new Path("file"), new Path("linkToFile"), false);
checkLink(linkAbs, new Path("file"), fileQual);
// Now rename the link's parent. Because the target was specified
// with a relative path the link should still resolve.
Path dir1 = new Path(testBaseDir1());
Path dir2 = new Path(testBaseDir2());
Path linkViaDir2 = new Path(testBaseDir2(), "linkToFile");
Path fileViaDir2 = new Path(schemeAuth, testBaseDir2()+"/file");
wrapper.rename(dir1, dir2, Rename.OVERWRITE);
FileStatus[] stats = wrapper.listStatus(dir2);
assertEquals(fileViaDir2,
wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
readFile(linkViaDir2);
}
@Test(timeout=10000)
/** Test creating a symlink using absolute paths */
public void testCreateLinkUsingAbsPaths() throws IOException {
Path fileAbs = new Path(testBaseDir1()+"/file");
Path linkAbs = new Path(testBaseDir1()+"/linkToFile");
Path schemeAuth = new Path(testURI().toString());
Path fileQual = new Path(schemeAuth, testBaseDir1()+"/file");
createAndWriteFile(fileAbs);
wrapper.createSymlink(fileAbs, linkAbs, false);
checkLink(linkAbs, fileAbs, fileQual);
// Now rename the link's parent. The target doesn't change and
// now no longer exists so accessing the link should fail.
Path dir1 = new Path(testBaseDir1());
Path dir2 = new Path(testBaseDir2());
Path linkViaDir2 = new Path(testBaseDir2(), "linkToFile");
wrapper.rename(dir1, dir2, Rename.OVERWRITE);
assertEquals(fileQual, wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
try {
readFile(linkViaDir2);
fail("The target should not exist");
} catch (FileNotFoundException x) {
// Expected
}
}
@Test(timeout=10000)
/**
* Test creating a symlink using fully and partially qualified paths.
* NB: For local fs this actually tests partially qualified paths,
* as they don't support fully qualified paths.
*/
public void testCreateLinkUsingFullyQualPaths() throws IOException {
Path fileAbs = new Path(testBaseDir1(), "file");
Path linkAbs = new Path(testBaseDir1(), "linkToFile");
Path fileQual = new Path(testURI().toString(), fileAbs);
Path linkQual = new Path(testURI().toString(), linkAbs);
createAndWriteFile(fileAbs);
wrapper.createSymlink(fileQual, linkQual, false);
checkLink(linkAbs,
"file".equals(getScheme()) ? fileAbs : fileQual,
fileQual);
// Now rename the link's parent. The target doesn't change and
// now no longer exists so accessing the link should fail.
Path dir1 = new Path(testBaseDir1());
Path dir2 = new Path(testBaseDir2());
Path linkViaDir2 = new Path(testBaseDir2(), "linkToFile");
wrapper.rename(dir1, dir2, Rename.OVERWRITE);
assertEquals(fileQual, wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
try {
readFile(linkViaDir2);
fail("The target should not exist");
} catch (FileNotFoundException x) {
// Expected
}
}
@Test(timeout=10000)
/**
* Test creating a symlink using partially qualified paths, ie a scheme
* but no authority and vice versa. We just test link targets here since
* creating using a partially qualified path is file system specific.
*/
public void testCreateLinkUsingPartQualPath1() throws IOException {
// Partially qualified paths are covered for local file systems
// in the previous test.
assumeTrue(!"file".equals(getScheme()));
Path schemeAuth = new Path(testURI().toString());
Path fileWoHost = new Path(getScheme()+"://"+testBaseDir1()+"/file");
Path link = new Path(testBaseDir1()+"/linkToFile");
Path linkQual = new Path(schemeAuth, testBaseDir1()+"/linkToFile");
FSTestWrapper localWrapper = wrapper.getLocalFSWrapper();
wrapper.createSymlink(fileWoHost, link, false);
// Partially qualified path is stored
assertEquals(fileWoHost, wrapper.getLinkTarget(linkQual));
// NB: We do not add an authority
assertEquals(fileWoHost.toString(),
wrapper.getFileLinkStatus(link).getSymlink().toString());
assertEquals(fileWoHost.toString(),
wrapper.getFileLinkStatus(linkQual).getSymlink().toString());
// Ditto even from another file system
if (wrapper instanceof FileContextTestWrapper) {
assertEquals(fileWoHost.toString(),
localWrapper.getFileLinkStatus(linkQual).getSymlink().toString());
}
// Same as if we accessed a partially qualified path directly
try {
readFile(link);
fail("DFS requires URIs with schemes have an authority");
} catch (java.lang.RuntimeException e) {
assertTrue(wrapper instanceof FileContextTestWrapper);
// Expected
} catch (FileNotFoundException e) {
assertTrue(wrapper instanceof FileSystemTestWrapper);
GenericTestUtils.assertExceptionContains(
"File does not exist: /test1/file", e);
}
}
@Test(timeout=10000)
/** Same as above but vice versa (authority but no scheme) */
public void testCreateLinkUsingPartQualPath2() throws IOException {
Path link = new Path(testBaseDir1(), "linkToFile");
Path fileWoScheme = new Path("//"+testURI().getAuthority()+
testBaseDir1()+"/file");
if ("file".equals(getScheme())) {
return;
}
wrapper.createSymlink(fileWoScheme, link, false);
assertEquals(fileWoScheme, wrapper.getLinkTarget(link));
assertEquals(fileWoScheme.toString(),
wrapper.getFileLinkStatus(link).getSymlink().toString());
try {
readFile(link);
fail("Accessed a file with w/o scheme");
} catch (IOException e) {
// Expected
if (wrapper instanceof FileContextTestWrapper) {
GenericTestUtils.assertExceptionContains(
AbstractFileSystem.NO_ABSTRACT_FS_ERROR, e);
} else if (wrapper instanceof FileSystemTestWrapper) {
assertEquals("No FileSystem for scheme: null", e.getMessage());
}
}
}
@Test(timeout=10000)
/** Lstat and readlink on a normal file and directory */
public void testLinkStatusAndTargetWithNonLink() throws IOException {
Path schemeAuth = new Path(testURI().toString());
Path dir = new Path(testBaseDir1());
Path dirQual = new Path(schemeAuth, dir.toString());
Path file = new Path(testBaseDir1(), "file");
Path fileQual = new Path(schemeAuth, file.toString());
createAndWriteFile(file);
assertEquals(wrapper.getFileStatus(file), wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(dir), wrapper.getFileLinkStatus(dir));
try {
wrapper.getLinkTarget(file);
fail("Get link target on non-link should throw an IOException");
} catch (IOException x) {
assertEquals("Path "+fileQual+" is not a symbolic link", x.getMessage());
}
try {
wrapper.getLinkTarget(dir);
fail("Get link target on non-link should throw an IOException");
} catch (IOException x) {
assertEquals("Path "+dirQual+" is not a symbolic link", x.getMessage());
}
}
@Test(timeout=10000)
/** Test create symlink to a directory */
public void testCreateLinkToDirectory() throws IOException {
Path dir1 = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
createAndWriteFile(file);
wrapper.createSymlink(dir1, linkToDir, false);
assertFalse(wrapper.isFile(linkToDir));
assertTrue(wrapper.isDir(linkToDir));
assertTrue(wrapper.getFileStatus(linkToDir).isDirectory());
assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink());
}
@Test(timeout=10000)
/** Test create and remove a file through a symlink */
public void testCreateFileViaSymlink() throws IOException {
Path dir = new Path(testBaseDir1());
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path fileViaLink = new Path(linkToDir, "file");
wrapper.createSymlink(dir, linkToDir, false);
createAndWriteFile(fileViaLink);
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.getFileStatus(fileViaLink).isDirectory());
readFile(fileViaLink);
wrapper.delete(fileViaLink, true);
assertFalse(wrapper.exists(fileViaLink));
}
@Test(timeout=10000)
/** Test make and delete directory through a symlink */
public void testCreateDirViaSymlink() throws IOException {
Path dir1 = new Path(testBaseDir1());
Path subDir = new Path(testBaseDir1(), "subDir");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path subDirViaLink = new Path(linkToDir, "subDir");
wrapper.createSymlink(dir1, linkToDir, false);
wrapper.mkdir(subDirViaLink, FileContext.DEFAULT_PERM, true);
assertTrue(wrapper.isDir(subDirViaLink));
wrapper.delete(subDirViaLink, false);
assertFalse(wrapper.exists(subDirViaLink));
assertFalse(wrapper.exists(subDir));
}
@Test(timeout=10000)
/** Create symlink through a symlink */
public void testCreateLinkViaLink() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path dir1 = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path fileViaLink = new Path(linkToDir, "file");
Path linkToFile = new Path(linkToDir, "linkToFile");
/*
* /b2/linkToDir -> /b1
* /b2/linkToDir/linkToFile -> /b2/linkToDir/file
*/
createAndWriteFile(file);
wrapper.createSymlink(dir1, linkToDir, false);
wrapper.createSymlink(fileViaLink, linkToFile, false);
assertTrue(wrapper.isFile(linkToFile));
assertTrue(wrapper.getFileLinkStatus(linkToFile).isSymlink());
readFile(linkToFile);
assertEquals(fileSize, wrapper.getFileStatus(linkToFile).getLen());
assertEquals(fileViaLink, wrapper.getLinkTarget(linkToFile));
}
@Test(timeout=10000)
/** Test create symlink to a directory */
public void testListStatusUsingLink() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "link");
createAndWriteFile(file);
wrapper.createSymlink(new Path(testBaseDir1()), link, false);
// The size of the result is file system dependent, Hdfs is 2 (file
// and link) and LocalFs is 3 (file, link, file crc).
FileStatus[] stats = wrapper.listStatus(link);
assertTrue(stats.length == 2 || stats.length == 3);
RemoteIterator<FileStatus> statsItor = wrapper.listStatusIterator(link);
int dirLen = 0;
while(statsItor.hasNext()) {
statsItor.next();
dirLen++;
}
assertTrue(dirLen == 2 || dirLen == 3);
}
@Test(timeout=10000)
/** Test create symlink using the same path */
public void testCreateLinkTwice() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
try {
wrapper.createSymlink(file, link, false);
fail("link already exists");
} catch (IOException x) {
// Expected
}
}
@Test(timeout=10000)
/** Test access via a symlink to a symlink */
public void testCreateLinkToLink() throws IOException {
Path dir1 = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path linkToLink = new Path(testBaseDir2(), "linkToLink");
Path fileViaLink = new Path(testBaseDir2(), "linkToLink/file");
createAndWriteFile(file);
wrapper.createSymlink(dir1, linkToDir, false);
wrapper.createSymlink(linkToDir, linkToLink, false);
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.getFileStatus(fileViaLink).isDirectory());
readFile(fileViaLink);
}
@Test(timeout=10000)
/** Can not create a file with path that refers to a symlink */
public void testCreateFileDirExistingLink() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
try {
createAndWriteFile(link);
fail("link already exists");
} catch (IOException x) {
// Expected
}
try {
wrapper.mkdir(link, FsPermission.getDefault(), false);
fail("link already exists");
} catch (IOException x) {
// Expected
}
}
@Test(timeout=10000)
/** Test deleting and recreating a symlink */
public void testUseLinkAferDeleteLink() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
wrapper.delete(link, false);
try {
readFile(link);
fail("link was deleted");
} catch (IOException x) {
// Expected
}
readFile(file);
wrapper.createSymlink(file, link, false);
readFile(link);
}
@Test(timeout=10000)
/** Test create symlink to . */
public void testCreateLinkToDot() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToDot");
createAndWriteFile(file);
wrapper.setWorkingDirectory(dir);
try {
wrapper.createSymlink(new Path("."), link, false);
fail("Created symlink to dot");
} catch (IOException x) {
// Expected. Path(".") resolves to "" because URI normalizes
// the dot away and AbstractFileSystem considers "" invalid.
}
}
@Test(timeout=10000)
/** Test create symlink to .. */
public void testCreateLinkToDotDot() throws IOException {
Path file = new Path(testBaseDir1(), "test/file");
Path dotDot = new Path(testBaseDir1(), "test/..");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path fileViaLink = new Path(linkToDir, "test/file");
// Symlink to .. is not a problem since the .. is squashed early
assertEquals(new Path(testBaseDir1()), dotDot);
createAndWriteFile(file);
wrapper.createSymlink(dotDot, linkToDir, false);
readFile(fileViaLink);
assertEquals(fileSize, wrapper.getFileStatus(fileViaLink).getLen());
}
@Test(timeout=10000)
/** Test create symlink to ../file */
public void testCreateLinkToDotDotPrefix() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path dir = new Path(testBaseDir1(), "test");
Path link = new Path(testBaseDir1(), "test/link");
createAndWriteFile(file);
wrapper.mkdir(dir, FsPermission.getDefault(), false);
wrapper.setWorkingDirectory(dir);
wrapper.createSymlink(new Path("../file"), link, false);
readFile(link);
assertEquals(new Path("../file"), wrapper.getLinkTarget(link));
}
@Test(timeout=10000)
/** Test rename file using a path that contains a symlink. The rename should
* work as if the path did not contain a symlink */
public void testRenameFileViaSymlink() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path fileViaLink = new Path(linkToDir, "file");
Path fileNewViaLink = new Path(linkToDir, "fileNew");
createAndWriteFile(file);
wrapper.createSymlink(dir, linkToDir, false);
wrapper.rename(fileViaLink, fileNewViaLink);
assertFalse(wrapper.exists(fileViaLink));
assertFalse(wrapper.exists(file));
assertTrue(wrapper.exists(fileNewViaLink));
}
@Test(timeout=10000)
/** Test rename a file through a symlink but this time only the
* destination path has an intermediate symlink. The rename should work
* as if the path did not contain a symlink */
public void testRenameFileToDestViaSymlink() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path subDir = new Path(linkToDir, "subDir");
createAndWriteFile(file);
wrapper.createSymlink(dir, linkToDir, false);
wrapper.mkdir(subDir, FileContext.DEFAULT_PERM, false);
try {
wrapper.rename(file, subDir);
fail("Renamed file to a directory");
} catch (IOException e) {
// Expected. Both must be directories.
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(file));
}
@Test(timeout=10000)
/** Similar tests as the previous ones but rename a directory */
public void testRenameDirViaSymlink() throws IOException {
Path baseDir = new Path(testBaseDir1());
Path dir = new Path(baseDir, "dir");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path dirViaLink = new Path(linkToDir, "dir");
Path dirNewViaLink = new Path(linkToDir, "dirNew");
wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
wrapper.createSymlink(baseDir, linkToDir, false);
assertTrue(wrapper.exists(dirViaLink));
wrapper.rename(dirViaLink, dirNewViaLink);
assertFalse(wrapper.exists(dirViaLink));
assertFalse(wrapper.exists(dir));
assertTrue(wrapper.exists(dirNewViaLink));
}
@Test(timeout=10000)
/** Similar tests as the previous ones but rename a symlink */
public void testRenameSymlinkViaSymlink() throws IOException {
Path baseDir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "link");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path linkViaLink = new Path(linkToDir, "link");
Path linkNewViaLink = new Path(linkToDir, "linkNew");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
wrapper.createSymlink(baseDir, linkToDir, false);
wrapper.rename(linkViaLink, linkNewViaLink);
assertFalse(wrapper.exists(linkViaLink));
// Check that we didn't rename the link target
assertTrue(wrapper.exists(file));
assertTrue(wrapper.getFileLinkStatus(linkNewViaLink).isSymlink() ||
emulatingSymlinksOnWindows());
readFile(linkNewViaLink);
}
@Test(timeout=10000)
/** Test rename a directory to a symlink to a directory */
public void testRenameDirToSymlinkToDir() throws IOException {
Path dir1 = new Path(testBaseDir1());
Path subDir = new Path(testBaseDir2(), "subDir");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
wrapper.mkdir(subDir, FileContext.DEFAULT_PERM, false);
wrapper.createSymlink(subDir, linkToDir, false);
try {
wrapper.rename(dir1, linkToDir, Rename.OVERWRITE);
fail("Renamed directory to a symlink");
} catch (IOException e) {
// Expected. Both must be directories.
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir1));
assertTrue(wrapper.exists(linkToDir));
}
@Test(timeout=10000)
/** Test rename a directory to a symlink to a file */
public void testRenameDirToSymlinkToFile() throws IOException {
Path dir1 = new Path(testBaseDir1());
Path file = new Path(testBaseDir2(), "file");
Path linkToFile = new Path(testBaseDir2(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, linkToFile, false);
try {
wrapper.rename(dir1, linkToFile, Rename.OVERWRITE);
fail("Renamed directory to a symlink");
} catch (IOException e) {
// Expected. Both must be directories.
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir1));
assertTrue(wrapper.exists(linkToFile));
}
@Test(timeout=10000)
/** Test rename a directory to a dangling symlink */
public void testRenameDirToDanglingSymlink() throws IOException {
Path dir = new Path(testBaseDir1());
Path link = new Path(testBaseDir2(), "linkToFile");
wrapper.createSymlink(new Path("/doesNotExist"), link, false);
try {
wrapper.rename(dir, link, Rename.OVERWRITE);
fail("Renamed directory to a symlink");
} catch (IOException e) {
// Expected. Both must be directories.
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir));
assertTrue(wrapper.getFileLinkStatus(link) != null);
}
@Test(timeout=10000)
/** Test rename a file to a symlink to a directory */
public void testRenameFileToSymlinkToDir() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path subDir = new Path(testBaseDir1(), "subDir");
Path link = new Path(testBaseDir1(), "link");
wrapper.mkdir(subDir, FileContext.DEFAULT_PERM, false);
wrapper.createSymlink(subDir, link, false);
createAndWriteFile(file);
try {
wrapper.rename(file, link);
fail("Renamed file to symlink w/o overwrite");
} catch (IOException e) {
// Expected
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(file, link, Rename.OVERWRITE);
assertFalse(wrapper.exists(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
@Test(timeout=10000)
/** Test rename a file to a symlink to a file */
public void testRenameFileToSymlinkToFile() throws IOException {
Path file1 = new Path(testBaseDir1(), "file1");
Path file2 = new Path(testBaseDir1(), "file2");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file1);
createAndWriteFile(file2);
wrapper.createSymlink(file2, link, false);
try {
wrapper.rename(file1, link);
fail("Renamed file to symlink w/o overwrite");
} catch (IOException e) {
// Expected
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(file1, link, Rename.OVERWRITE);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
@Test(timeout=10000)
/** Test rename a file to a dangling symlink */
public void testRenameFileToDanglingSymlink() throws IOException {
/* NB: Local file system doesn't handle dangling links correctly
* since File.exists(danglinLink) returns false. */
if ("file".equals(getScheme())) {
return;
}
Path file1 = new Path(testBaseDir1(), "file1");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file1);
wrapper.createSymlink(new Path("/doesNotExist"), link, false);
try {
wrapper.rename(file1, link);
} catch (IOException e) {
// Expected
}
wrapper.rename(file1, link, Rename.OVERWRITE);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
@Test(timeout=10000)
/** Rename a symlink to a new non-existant name */
public void testRenameSymlinkNonExistantDest() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link1 = new Path(testBaseDir1(), "linkToFile1");
Path link2 = new Path(testBaseDir1(), "linkToFile2");
createAndWriteFile(file);
wrapper.createSymlink(file, link1, false);
wrapper.rename(link1, link2);
assertTrue(wrapper.getFileLinkStatus(link2).isSymlink() ||
emulatingSymlinksOnWindows());
readFile(link2);
readFile(file);
assertFalse(wrapper.exists(link1));
}
@Test(timeout=10000)
/** Rename a symlink to a file that exists */
public void testRenameSymlinkToExistingFile() throws IOException {
Path file1 = new Path(testBaseDir1(), "file");
Path file2 = new Path(testBaseDir1(), "someFile");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file1);
createAndWriteFile(file2);
wrapper.createSymlink(file2, link, false);
try {
wrapper.rename(link, file1);
fail("Renamed w/o passing overwrite");
} catch (IOException e) {
// Expected
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(link, file1, Rename.OVERWRITE);
assertFalse(wrapper.exists(link));
if (!emulatingSymlinksOnWindows()) {
assertTrue(wrapper.getFileLinkStatus(file1).isSymlink());
assertEquals(file2, wrapper.getLinkTarget(file1));
}
}
@Test(timeout=10000)
/** Rename a symlink to a directory that exists */
public void testRenameSymlinkToExistingDir() throws IOException {
Path dir1 = new Path(testBaseDir1());
Path dir2 = new Path(testBaseDir2());
Path subDir = new Path(testBaseDir2(), "subDir");
Path link = new Path(testBaseDir1(), "linkToDir");
wrapper.createSymlink(dir1, link, false);
try {
wrapper.rename(link, dir2);
fail("Renamed link to a directory");
} catch (IOException e) {
// Expected. Both must be directories.
assertTrue(unwrapException(e) instanceof IOException);
}
try {
wrapper.rename(link, dir2, Rename.OVERWRITE);
fail("Renamed link to a directory");
} catch (IOException e) {
// Expected. Both must be directories.
assertTrue(unwrapException(e) instanceof IOException);
}
// Also fails when dir2 has a sub-directory
wrapper.mkdir(subDir, FsPermission.getDefault(), false);
try {
wrapper.rename(link, dir2, Rename.OVERWRITE);
fail("Renamed link to a directory");
} catch (IOException e) {
// Expected. Both must be directories.
assertTrue(unwrapException(e) instanceof IOException);
}
}
@Test(timeout=10000)
/** Rename a symlink to itself */
public void testRenameSymlinkToItself() throws IOException {
Path file = new Path(testBaseDir1(), "file");
createAndWriteFile(file);
Path link = new Path(testBaseDir1(), "linkToFile1");
wrapper.createSymlink(file, link, false);
try {
wrapper.rename(link, link);
fail("Failed to get expected IOException");
} catch (IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Fails with overwrite as well
try {
wrapper.rename(link, link, Rename.OVERWRITE);
fail("Failed to get expected IOException");
} catch (IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
}
@Test(timeout=10000)
/** Rename a symlink */
public void testRenameSymlink() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path file = new Path(testBaseDir1(), "file");
Path link1 = new Path(testBaseDir1(), "linkToFile1");
Path link2 = new Path(testBaseDir1(), "linkToFile2");
createAndWriteFile(file);
wrapper.createSymlink(file, link1, false);
wrapper.rename(link1, link2);
assertTrue(wrapper.getFileLinkStatus(link2).isSymlink());
assertFalse(wrapper.getFileStatus(link2).isDirectory());
readFile(link2);
readFile(file);
try {
createAndWriteFile(link2);
fail("link was not renamed");
} catch (IOException x) {
// Expected
}
}
@Test(timeout=10000)
/** Rename a symlink to the file it links to */
public void testRenameSymlinkToFileItLinksTo() throws IOException {
/* NB: The rename is not atomic, so file is deleted before renaming
* linkToFile. In this interval linkToFile is dangling and local file
* system does not handle dangling links because File.exists returns
* false for dangling links. */
if ("file".equals(getScheme())) {
return;
}
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
try {
wrapper.rename(link, file);
fail("Renamed symlink to its target");
} catch (IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Check the rename didn't happen
assertTrue(wrapper.isFile(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(file, wrapper.getLinkTarget(link));
try {
wrapper.rename(link, file, Rename.OVERWRITE);
fail("Renamed symlink to its target");
} catch (IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Check the rename didn't happen
assertTrue(wrapper.isFile(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(file, wrapper.getLinkTarget(link));
}
@Test(timeout=10000)
/** Rename a symlink to the directory it links to */
public void testRenameSymlinkToDirItLinksTo() throws IOException {
/* NB: The rename is not atomic, so dir is deleted before renaming
* linkToFile. In this interval linkToFile is dangling and local file
* system does not handle dangling links because File.exists returns
* false for dangling links. */
if ("file".equals(getScheme())) {
return;
}
Path dir = new Path(testBaseDir1(), "dir");
Path link = new Path(testBaseDir1(), "linkToDir");
wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
wrapper.createSymlink(dir, link, false);
try {
wrapper.rename(link, dir);
fail("Renamed symlink to its target");
} catch (IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Check the rename didn't happen
assertTrue(wrapper.isDir(dir));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(dir, wrapper.getLinkTarget(link));
try {
wrapper.rename(link, dir, Rename.OVERWRITE);
fail("Renamed symlink to its target");
} catch (IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Check the rename didn't happen
assertTrue(wrapper.isDir(dir));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(dir, wrapper.getLinkTarget(link));
}
@Test(timeout=10000)
/** Test rename the symlink's target */
public void testRenameLinkTarget() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path file = new Path(testBaseDir1(), "file");
Path fileNew = new Path(testBaseDir1(), "fileNew");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
wrapper.rename(file, fileNew, Rename.OVERWRITE);
try {
readFile(link);
fail("Link should be dangling");
} catch (IOException x) {
// Expected
}
wrapper.rename(fileNew, file, Rename.OVERWRITE);
readFile(link);
}
@Test(timeout=10000)
/** Test rename a file to path with destination that has symlink parent */
public void testRenameFileWithDestParentSymlink() throws IOException {
Path link = new Path(testBaseDir1(), "link");
Path file1 = new Path(testBaseDir1(), "file1");
Path file2 = new Path(testBaseDir1(), "file2");
Path file3 = new Path(link, "file3");
Path dir2 = new Path(testBaseDir2());
// Renaming /dir1/file1 to non-existant file /dir1/link/file3 is OK
// if link points to a directory...
wrapper.createSymlink(dir2, link, false);
createAndWriteFile(file1);
wrapper.rename(file1, file3);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(file3));
wrapper.rename(file3, file1);
// But fails if link is dangling...
wrapper.delete(link, false);
wrapper.createSymlink(file2, link, false);
try {
wrapper.rename(file1, file3);
} catch (IOException e) {
// Expected
assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
// And if link points to a file...
createAndWriteFile(file2);
try {
wrapper.rename(file1, file3);
} catch (IOException e) {
// Expected
assertTrue(unwrapException(e) instanceof ParentNotDirectoryException);
}
}
@Test(timeout=10000)
/**
* Create, write, read, append, rename, get the block locations,
* checksums, and delete a file using a path with a symlink as an
* intermediate path component where the link target was specified
* using an absolute path. Rename is covered in more depth below.
*/
public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {
Path baseDir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path fileNew = new Path(baseDir, "fileNew");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path fileViaLink = new Path(linkToDir, "file");
Path fileNewViaLink = new Path(linkToDir, "fileNew");
wrapper.createSymlink(baseDir, linkToDir, false);
createAndWriteFile(fileViaLink);
assertTrue(wrapper.exists(fileViaLink));
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.isDir(fileViaLink));
assertEquals(wrapper.getFileStatus(file),
wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(fileViaLink),
wrapper.getFileLinkStatus(fileViaLink));
readFile(fileViaLink);
appendToFile(fileViaLink);
wrapper.rename(fileViaLink, fileNewViaLink);
assertFalse(wrapper.exists(fileViaLink));
assertTrue(wrapper.exists(fileNewViaLink));
readFile(fileNewViaLink);
assertEquals(wrapper.getFileBlockLocations(fileNew, 0, 1).length,
wrapper.getFileBlockLocations(fileNewViaLink, 0, 1).length);
assertEquals(wrapper.getFileChecksum(fileNew),
wrapper.getFileChecksum(fileNewViaLink));
wrapper.delete(fileNewViaLink, true);
assertFalse(wrapper.exists(fileNewViaLink));
}
@Test(timeout=10000)
/**
* Operate on a file using a path with an intermediate symlink where
* the link target was specified as a fully qualified path.
*/
public void testAccessFileViaInterSymlinkQualTarget() throws IOException {
Path baseDir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path fileViaLink = new Path(linkToDir, "file");
wrapper.createSymlink(wrapper.makeQualified(baseDir), linkToDir, false);
createAndWriteFile(fileViaLink);
assertEquals(wrapper.getFileStatus(file),
wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(fileViaLink),
wrapper.getFileLinkStatus(fileViaLink));
readFile(fileViaLink);
}
@Test(timeout=10000)
/**
* Operate on a file using a path with an intermediate symlink where
* the link target was specified as a relative path.
*/
public void testAccessFileViaInterSymlinkRelTarget() throws IOException {
assumeTrue(!"file".equals(getScheme()));
Path dir = new Path(testBaseDir1(), "dir");
Path file = new Path(dir, "file");
Path linkToDir = new Path(testBaseDir1(), "linkToDir");
Path fileViaLink = new Path(linkToDir, "file");
wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
wrapper.createSymlink(new Path("dir"), linkToDir, false);
createAndWriteFile(fileViaLink);
// Note that getFileStatus returns fully qualified paths even
// when called on an absolute path.
assertEquals(wrapper.makeQualified(file),
wrapper.getFileStatus(file).getPath());
// In each case getFileLinkStatus returns the same FileStatus
// as getFileStatus since we're not calling it on a link and
// FileStatus objects are compared by Path.
assertEquals(wrapper.getFileStatus(file),
wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(fileViaLink),
wrapper.getFileLinkStatus(fileViaLink));
assertEquals(wrapper.getFileStatus(fileViaLink),
wrapper.getFileLinkStatus(file));
}
@Test(timeout=10000)
/** Test create, list, and delete a directory through a symlink */
public void testAccessDirViaSymlink() throws IOException {
Path baseDir = new Path(testBaseDir1());
Path dir = new Path(testBaseDir1(), "dir");
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
Path dirViaLink = new Path(linkToDir, "dir");
wrapper.createSymlink(baseDir, linkToDir, false);
wrapper.mkdir(dirViaLink, FileContext.DEFAULT_PERM, true);
assertTrue(wrapper.getFileStatus(dirViaLink).isDirectory());
FileStatus[] stats = wrapper.listStatus(dirViaLink);
assertEquals(0, stats.length);
RemoteIterator<FileStatus> statsItor = wrapper.listStatusIterator(dirViaLink);
assertFalse(statsItor.hasNext());
wrapper.delete(dirViaLink, false);
assertFalse(wrapper.exists(dirViaLink));
assertFalse(wrapper.exists(dir));
}
@Test(timeout=10000)
/** setTimes affects the target file not the link */
public void testSetTimesSymlinkToFile() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
long at = wrapper.getFileLinkStatus(link).getAccessTime();
// the local file system may not support millisecond timestamps
wrapper.setTimes(link, 2000L, 3000L);
assertEquals(at, wrapper.getFileLinkStatus(link).getAccessTime());
assertEquals(2000, wrapper.getFileStatus(file).getModificationTime());
assertEquals(3000, wrapper.getFileStatus(file).getAccessTime());
}
@Test(timeout=10000)
/** setTimes affects the target directory not the link */
public void testSetTimesSymlinkToDir() throws IOException {
Path dir = new Path(testBaseDir1(), "dir");
Path link = new Path(testBaseDir1(), "linkToDir");
wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
wrapper.createSymlink(dir, link, false);
long at = wrapper.getFileLinkStatus(link).getAccessTime();
// the local file system may not support millisecond timestamps
wrapper.setTimes(link, 2000L, 3000L);
assertEquals(at, wrapper.getFileLinkStatus(link).getAccessTime());
assertEquals(2000, wrapper.getFileStatus(dir).getModificationTime());
assertEquals(3000, wrapper.getFileStatus(dir).getAccessTime());
}
@Test(timeout=10000)
/** setTimes does not affect the link even though target does not exist */
public void testSetTimesDanglingLink() throws IOException {
Path file = new Path("/noSuchFile");
Path link = new Path(testBaseDir1()+"/link");
wrapper.createSymlink(file, link, false);
long at = wrapper.getFileLinkStatus(link).getAccessTime();
try {
wrapper.setTimes(link, 2000L, 3000L);
fail("set times to non-existant file");
} catch (IOException e) {
// Expected
}
assertEquals(at, wrapper.getFileLinkStatus(link).getAccessTime());
}
}
| 55,577 | 37.757322 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.log.Log;
/**
* <p>
* A collection of tests for the {@link FileSystem}.
* This test should be used for testing an instance of FileSystem
* that has been initialized to a specific default FileSystem such a
* LocalFileSystem, HDFS,S3, etc.
* </p>
* <p>
* To test a given {@link FileSystem} implementation create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fSys</code>
* {@link FileSystem} instance variable.
*
* Since this a junit 4 you can also do a single setup before
* the start of any tests.
* E.g.
* @BeforeClass public static void clusterSetupAtBegining()
* @AfterClass public static void ClusterShutdownAtEnd()
* </p>
*/
public abstract class FSMainOperationsBaseTest extends FileSystemTestHelper {
private static String TEST_DIR_AAA2 = "test/hadoop2/aaa";
private static String TEST_DIR_AAA = "test/hadoop/aaa";
private static String TEST_DIR_AXA = "test/hadoop/axa";
private static String TEST_DIR_AXX = "test/hadoop/axx";
private static int numBlocks = 2;
protected FileSystem fSys;
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
@Override
public boolean accept(final Path file) {
return true;
}
};
//A test filter with returns any path containing an "x" or "X"
final private static PathFilter TEST_X_FILTER = new PathFilter() {
@Override
public boolean accept(Path file) {
if(file.getName().contains("x") || file.getName().contains("X"))
return true;
else
return false;
}
};
protected static final byte[] data = getFileData(numBlocks,
getDefaultBlockSize());
abstract protected FileSystem createFileSystem() throws Exception;
public FSMainOperationsBaseTest() {
}
public FSMainOperationsBaseTest(String testRootDir) {
super(testRootDir);
}
@Before
public void setUp() throws Exception {
fSys = createFileSystem();
fSys.mkdirs(getTestRootPath(fSys, "test"));
}
@After
public void tearDown() throws Exception {
fSys.delete(new Path(getAbsoluteTestRootPath(fSys), new Path("test")), true);
}
protected Path getDefaultWorkingDirectory() throws IOException {
return getTestRootPath(fSys,
"/user/" + System.getProperty("user.name")).makeQualified(
fSys.getUri(), fSys.getWorkingDirectory());
}
protected boolean renameSupported() {
return true;
}
protected IOException unwrapException(IOException e) {
return e;
}
@Test
public void testFsStatus() throws Exception {
FsStatus fsStatus = fSys.getStatus(null);
Assert.assertNotNull(fsStatus);
//used, free and capacity are non-negative longs
Assert.assertTrue(fsStatus.getUsed() >= 0);
Assert.assertTrue(fsStatus.getRemaining() >= 0);
Assert.assertTrue(fsStatus.getCapacity() >= 0);
}
@Test
public void testWorkingDirectory() throws Exception {
// First we cd to our test root
Path workDir = new Path(getAbsoluteTestRootPath(fSys), new Path("test"));
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir, fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir, fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(), fSys.getWorkingDirectory());
// cd using a relative path
// Go back to our test root
workDir = new Path(getAbsoluteTestRootPath(fSys), new Path("test"));
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir, fSys.getWorkingDirectory());
Path relativeDir = new Path("existingDir1");
Path absoluteDir = new Path(workDir,"existingDir1");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
// cd using a absolute path
absoluteDir = getTestRootPath(fSys, "test/existingDir2");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
// Now open a file relative to the wd we just set above.
Path absolutePath = new Path(absoluteDir, "foo");
createFile(fSys, absolutePath);
fSys.open(new Path("foo")).close();
// Now mkdir relative to the dir we cd'ed to
fSys.mkdirs(new Path("newDir"));
Assert.assertTrue(isDir(fSys, new Path(absoluteDir, "newDir")));
/**
* We cannot test this because FileSystem has never checked for
* existence of working dir - fixing this would break compatibility,
*
absoluteDir = getTestRootPath(fSys, "nonexistingPath");
try {
fSys.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
} catch (Exception e) {
// Exception as expected
}
*/
}
// Try a URI
@Test
public void testWDAbsolute() throws IOException {
Path absoluteDir = new Path(fSys.getUri() + "/test/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
}
@Test
public void testMkdirs() throws Exception {
Path testDir = getTestRootPath(fSys, "test/hadoop");
Assert.assertFalse(exists(fSys, testDir));
Assert.assertFalse(isFile(fSys, testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys, testDir));
Assert.assertFalse(isFile(fSys, testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys, testDir));
Assert.assertFalse(isFile(fSys, testDir));
Path parentDir = testDir.getParent();
Assert.assertTrue(exists(fSys, parentDir));
Assert.assertFalse(isFile(fSys, parentDir));
Path grandparentDir = parentDir.getParent();
Assert.assertTrue(exists(fSys, grandparentDir));
Assert.assertFalse(isFile(fSys, grandparentDir));
}
@Test
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = getTestRootPath(fSys, "test/hadoop");
Assert.assertFalse(exists(fSys, testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys, testDir));
createFile(getTestRootPath(fSys, "test/hadoop/file"));
Path testSubDir = getTestRootPath(fSys, "test/hadoop/file/subdir");
try {
fSys.mkdirs(testSubDir);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
Assert.assertFalse(exists(fSys, testSubDir));
Path testDeepSubDir = getTestRootPath(fSys, "test/hadoop/file/deep/sub/dir");
try {
fSys.mkdirs(testDeepSubDir);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
Assert.assertFalse(exists(fSys, testDeepSubDir));
}
@Test
public void testGetFileStatusThrowsExceptionForNonExistentFile()
throws Exception {
try {
fSys.getFileStatus(getTestRootPath(fSys, "test/hadoop/file"));
Assert.fail("Should throw FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
}
}
@Test
public void testListStatusThrowsExceptionForNonExistentFile()
throws Exception {
try {
fSys.listStatus(getTestRootPath(fSys, "test/hadoop/file"));
Assert.fail("Should throw FileNotFoundException");
} catch (FileNotFoundException fnfe) {
// expected
}
}
// TODO: update after fixing HADOOP-7352
@Test
public void testListStatusThrowsExceptionForUnreadableDir()
throws Exception {
Path testRootDir = getTestRootPath(fSys, "test/hadoop/dir");
Path obscuredDir = new Path(testRootDir, "foo");
Path subDir = new Path(obscuredDir, "bar"); //so foo is non-empty
fSys.mkdirs(subDir);
fSys.setPermission(obscuredDir, new FsPermission((short)0)); //no access
try {
fSys.listStatus(obscuredDir);
Assert.fail("Should throw IOException");
} catch (IOException ioe) {
// expected
} finally {
// make sure the test directory can be deleted
fSys.setPermission(obscuredDir, new FsPermission((short)0755)); //default
}
}
@Test
public void testListStatus() throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, "test/hadoop/a"),
getTestRootPath(fSys, "test/hadoop/b"),
getTestRootPath(fSys, "test/hadoop/c/1"), };
Assert.assertFalse(exists(fSys, testDirs[0]));
for (Path path : testDirs) {
fSys.mkdirs(path);
}
// test listStatus that returns an array
FileStatus[] paths = fSys.listStatus(getTestRootPath(fSys, "test"));
Assert.assertEquals(1, paths.length);
Assert.assertEquals(getTestRootPath(fSys, "test/hadoop"), paths[0].getPath());
paths = fSys.listStatus(getTestRootPath(fSys, "test/hadoop"));
Assert.assertEquals(3, paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, "test/hadoop/a"),
paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, "test/hadoop/b"),
paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, "test/hadoop/c"),
paths));
paths = fSys.listStatus(getTestRootPath(fSys, "test/hadoop/a"));
Assert.assertEquals(0, paths.length);
}
@Test
public void testListStatusFilterWithNoMatches() throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA2),
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
// listStatus with filters returns empty correctly
FileStatus[] filteredPaths = fSys.listStatus(
getTestRootPath(fSys, "test"), TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
@Test
public void testListStatusFilterWithSomeMatches() throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AAA2), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
// should return 2 paths ("/test/hadoop/axa" and "/test/hadoop/axx")
FileStatus[] filteredPaths = fSys.listStatus(
getTestRootPath(fSys, "test/hadoop"), TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AXA), filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AXX), filteredPaths));
}
@Test
public void testGlobStatusNonExistentFile() throws Exception {
FileStatus[] paths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoopfsdf"));
Assert.assertNull(paths);
paths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoopfsdf/?"));
Assert.assertEquals(0, paths.length);
paths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoopfsdf/xyz*/?"));
Assert.assertEquals(0, paths.length);
}
@Test
public void testGlobStatusWithNoMatchesInPath() throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AAA2), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
// should return nothing
FileStatus[] paths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop/?"));
Assert.assertEquals(0, paths.length);
}
@Test
public void testGlobStatusSomeMatchesInDirectories() throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AAA2), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
// Should return two items ("/test/hadoop" and "/test/hadoop2")
FileStatus[] paths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop*"));
Assert.assertEquals(2, paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
"test/hadoop"), paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
"test/hadoop2"), paths));
}
@Test
public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AAA2), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
//Should return all 4 items ("/test/hadoop/aaa", "/test/hadoop/axa"
//"/test/hadoop/axx", and "/test/hadoop2/axx")
FileStatus[] paths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop*/*"));
Assert.assertEquals(4, paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AAA), paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AXA), paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AXX), paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AAA2), paths));
}
@Test
public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AAA2), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
//Should return only 2 items ("/test/hadoop/axa", "/test/hadoop/axx")
FileStatus[] paths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop/ax?"));
Assert.assertEquals(2, paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AXA), paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AXX), paths));
}
@Test
public void testGlobStatusFilterWithEmptyPathResults() throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AXX), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
//This should return an empty set
FileStatus[] filteredPaths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop/?"),
DEFAULT_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
@Test
public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AXX), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
//This should return all three (aaa, axa, axx)
FileStatus[] filteredPaths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop/*"),
DEFAULT_FILTER);
Assert.assertEquals(3, filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AAA), filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AXA), filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AXX), filteredPaths));
}
@Test
public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AXX), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
//This should return all three (aaa, axa, axx)
FileStatus[] filteredPaths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop/a??"),
DEFAULT_FILTER);
Assert.assertEquals(3, filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AAA),
filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AXA),
filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AXX),
filteredPaths));
}
@Test
public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AXX), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
//This should return two (axa, axx)
FileStatus[] filteredPaths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop/*"),
TEST_X_FILTER);
Assert.assertEquals(2, filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AXA), filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,
TEST_DIR_AXX), filteredPaths));
}
@Test
public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AXX), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
//This should return an empty set
FileStatus[] filteredPaths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop/?"),
TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
@Test
public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fSys, TEST_DIR_AAA),
getTestRootPath(fSys, TEST_DIR_AXA),
getTestRootPath(fSys, TEST_DIR_AXX),
getTestRootPath(fSys, TEST_DIR_AXX), };
if (exists(fSys, testDirs[0]) == false) {
for (Path path : testDirs) {
fSys.mkdirs(path);
}
}
//This should return two (axa, axx)
FileStatus[] filteredPaths = fSys.globStatus(
getTestRootPath(fSys, "test/hadoop/a??"),
TEST_X_FILTER);
Assert.assertEquals(2, filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AXA),
filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys, TEST_DIR_AXX),
filteredPaths));
}
@Test
public void testWriteReadAndDeleteEmptyFile() throws Exception {
writeReadAndDelete(0);
}
@Test
public void testWriteReadAndDeleteHalfABlock() throws Exception {
writeReadAndDelete(getDefaultBlockSize() / 2);
}
@Test
public void testWriteReadAndDeleteOneBlock() throws Exception {
writeReadAndDelete(getDefaultBlockSize());
}
@Test
public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
int blockSize = getDefaultBlockSize();
writeReadAndDelete(blockSize + (blockSize / 2));
}
@Test
public void testWriteReadAndDeleteTwoBlocks() throws Exception {
writeReadAndDelete(getDefaultBlockSize() * 2);
}
protected void writeReadAndDelete(int len) throws IOException {
Path path = getTestRootPath(fSys, "test/hadoop/file");
fSys.mkdirs(path.getParent());
FSDataOutputStream out =
fSys.create(path, false, 4096, (short) 1, getDefaultBlockSize() );
out.write(data, 0, len);
out.close();
Assert.assertTrue("Exists", exists(fSys, path));
Assert.assertEquals("Length", len, fSys.getFileStatus(path).getLen());
FSDataInputStream in = fSys.open(path);
byte[] buf = new byte[len];
in.readFully(0, buf);
in.close();
Assert.assertEquals(len, buf.length);
for (int i = 0; i < buf.length; i++) {
Assert.assertEquals("Position " + i, data[i], buf[i]);
}
Assert.assertTrue("Deleted", fSys.delete(path, false));
Assert.assertFalse("No longer exists", exists(fSys, path));
}
@Test
public void testOverwrite() throws IOException {
Path path = getTestRootPath(fSys, "test/hadoop/file");
fSys.mkdirs(path.getParent());
createFile(path);
Assert.assertTrue("Exists", exists(fSys, path));
Assert.assertEquals("Length", data.length, fSys.getFileStatus(path).getLen());
try {
createFile(path);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// Expected
}
FSDataOutputStream out = fSys.create(path, true, 4096);
out.write(data, 0, data.length);
out.close();
Assert.assertTrue("Exists", exists(fSys, path));
Assert.assertEquals("Length", data.length, fSys.getFileStatus(path).getLen());
}
@Test
public void testWriteInNonExistentDirectory() throws IOException {
Path path = getTestRootPath(fSys, "test/hadoop/file");
Assert.assertFalse("Parent doesn't exist", exists(fSys, path.getParent()));
createFile(path);
Assert.assertTrue("Exists", exists(fSys, path));
Assert.assertEquals("Length", data.length, fSys.getFileStatus(path).getLen());
Assert.assertTrue("Parent exists", exists(fSys, path.getParent()));
}
@Test
public void testDeleteNonExistentFile() throws IOException {
Path path = getTestRootPath(fSys, "test/hadoop/file");
Assert.assertFalse("Doesn't exist", exists(fSys, path));
Assert.assertFalse("No deletion", fSys.delete(path, true));
}
@Test
public void testDeleteRecursively() throws IOException {
Path dir = getTestRootPath(fSys, "test/hadoop");
Path file = getTestRootPath(fSys, "test/hadoop/file");
Path subdir = getTestRootPath(fSys, "test/hadoop/subdir");
createFile(file);
fSys.mkdirs(subdir);
Assert.assertTrue("File exists", exists(fSys, file));
Assert.assertTrue("Dir exists", exists(fSys, dir));
Assert.assertTrue("Subdir exists", exists(fSys, subdir));
try {
fSys.delete(dir, false);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
Assert.assertTrue("File still exists", exists(fSys, file));
Assert.assertTrue("Dir still exists", exists(fSys, dir));
Assert.assertTrue("Subdir still exists", exists(fSys, subdir));
Assert.assertTrue("Deleted", fSys.delete(dir, true));
Assert.assertFalse("File doesn't exist", exists(fSys, file));
Assert.assertFalse("Dir doesn't exist", exists(fSys, dir));
Assert.assertFalse("Subdir doesn't exist", exists(fSys, subdir));
}
@Test
public void testDeleteEmptyDirectory() throws IOException {
Path dir = getTestRootPath(fSys, "test/hadoop");
fSys.mkdirs(dir);
Assert.assertTrue("Dir exists", exists(fSys, dir));
Assert.assertTrue("Deleted", fSys.delete(dir, false));
Assert.assertFalse("Dir doesn't exist", exists(fSys, dir));
}
@Test
public void testRenameNonExistentPath() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/nonExistent");
Path dst = getTestRootPath(fSys, "test/new/newpath");
try {
rename(src, dst, false, false, false, Rename.NONE);
Assert.fail("Should throw FileNotFoundException");
} catch (IOException e) {
Log.info("XXX", e);
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src, dst, false, false, false, Rename.OVERWRITE);
Assert.fail("Should throw FileNotFoundException");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
@Test
public void testRenameFileToNonExistentDirectory() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fSys, "test/nonExistent/newfile");
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
@Test
public void testRenameFileToDestinationWithParentFile() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fSys, "test/parentFile/newfile");
createFile(dst.getParent());
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
}
@Test
public void testRenameFileToExistingParent() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fSys, "test/new/newfile");
fSys.mkdirs(dst.getParent());
rename(src, dst, true, false, true, Rename.OVERWRITE);
}
@Test
public void testRenameFileToItself() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/file");
createFile(src);
try {
rename(src, src, false, true, false, Rename.NONE);
Assert.fail("Renamed file to itself");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Also fails with overwrite
try {
rename(src, src, false, true, false, Rename.OVERWRITE);
Assert.fail("Renamed file to itself");
} catch (IOException e) {
// worked
}
}
@Test
public void testRenameFileAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fSys, "test/new/existingFile");
createFile(dst);
// Fails without overwrite option
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Succeeds with overwrite option
rename(src, dst, true, false, true, Rename.OVERWRITE);
}
@Test
public void testRenameFileAsExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fSys, "test/new/existingDir");
fSys.mkdirs(dst);
// Fails without overwrite option
try {
rename(src, dst, false, false, true, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
// File cannot be renamed as directory
try {
rename(src, dst, false, false, true, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
}
@Test
public void testRenameDirectoryToItself() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/dir");
fSys.mkdirs(src);
try {
rename(src, src, false, true, false, Rename.NONE);
Assert.fail("Renamed directory to itself");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Also fails with overwrite
try {
rename(src, src, false, true, false, Rename.OVERWRITE);
Assert.fail("Renamed directory to itself");
} catch (IOException e) {
// worked
}
}
@Test
public void testRenameDirectoryToNonExistentParent() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/dir");
fSys.mkdirs(src);
Path dst = getTestRootPath(fSys, "test/nonExistent/newdir");
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
IOException ioException = unwrapException(e);
if (!(ioException instanceof FileNotFoundException)) {
throw ioException;
}
}
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
IOException ioException = unwrapException(e);
if (!(ioException instanceof FileNotFoundException)) {
throw ioException;
}
}
}
@Test
public void testRenameDirectoryAsNonExistentDirectory() throws Exception {
doTestRenameDirectoryAsNonExistentDirectory(Rename.NONE);
tearDown();
doTestRenameDirectoryAsNonExistentDirectory(Rename.OVERWRITE);
}
private void doTestRenameDirectoryAsNonExistentDirectory(Rename... options)
throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/dir");
fSys.mkdirs(src);
createFile(getTestRootPath(fSys, "test/hadoop/dir/file1"));
createFile(getTestRootPath(fSys, "test/hadoop/dir/subdir/file2"));
Path dst = getTestRootPath(fSys, "test/new/newdir");
fSys.mkdirs(dst.getParent());
rename(src, dst, true, false, true, options);
Assert.assertFalse("Nested file1 exists",
exists(fSys, getTestRootPath(fSys, "test/hadoop/dir/file1")));
Assert.assertFalse("Nested file2 exists",
exists(fSys, getTestRootPath(fSys, "test/hadoop/dir/subdir/file2")));
Assert.assertTrue("Renamed nested file1 exists",
exists(fSys, getTestRootPath(fSys, "test/new/newdir/file1")));
Assert.assertTrue("Renamed nested exists",
exists(fSys, getTestRootPath(fSys, "test/new/newdir/subdir/file2")));
}
@Test
public void testRenameDirectoryAsEmptyDirectory() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/dir");
fSys.mkdirs(src);
createFile(getTestRootPath(fSys, "test/hadoop/dir/file1"));
createFile(getTestRootPath(fSys, "test/hadoop/dir/subdir/file2"));
Path dst = getTestRootPath(fSys, "test/new/newdir");
fSys.mkdirs(dst);
// Fails without overwrite option
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
// Expected (cannot over-write non-empty destination)
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Succeeds with the overwrite option
rename(src, dst, true, false, true, Rename.OVERWRITE);
}
@Test
public void testRenameDirectoryAsNonEmptyDirectory() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/dir");
fSys.mkdirs(src);
createFile(getTestRootPath(fSys, "test/hadoop/dir/file1"));
createFile(getTestRootPath(fSys, "test/hadoop/dir/subdir/file2"));
Path dst = getTestRootPath(fSys, "test/new/newdir");
fSys.mkdirs(dst);
createFile(getTestRootPath(fSys, "test/new/newdir/file1"));
// Fails without overwrite option
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
// Expected (cannot over-write non-empty destination)
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Fails even with the overwrite option
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException ex) {
// Expected (cannot over-write non-empty destination)
}
}
@Test
public void testRenameDirectoryAsFile() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fSys, "test/hadoop/dir");
fSys.mkdirs(src);
Path dst = getTestRootPath(fSys, "test/new/newfile");
createFile(dst);
// Fails without overwrite option
try {
rename(src, dst, false, true, true, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
// Directory cannot be renamed as existing file
try {
rename(src, dst, false, true, true, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException ex) {
}
}
@Test
public void testInputStreamClosedTwice() throws IOException {
//HADOOP-4760 according to Closeable#close() closing already-closed
//streams should have no effect.
Path src = getTestRootPath(fSys, "test/hadoop/file");
createFile(src);
FSDataInputStream in = fSys.open(src);
in.close();
in.close();
}
@Test
public void testOutputStreamClosedTwice() throws IOException {
//HADOOP-4760 according to Closeable#close() closing already-closed
//streams should have no effect.
Path src = getTestRootPath(fSys, "test/hadoop/file");
FSDataOutputStream out = fSys.create(src);
out.writeChar('H'); //write some data
out.close();
out.close();
}
@Test
public void testGetWrappedInputStream() throws IOException {
Path src = getTestRootPath(fSys, "test/hadoop/file");
createFile(src);
FSDataInputStream in = fSys.open(src);
InputStream is = in.getWrappedStream();
in.close();
Assert.assertNotNull(is);
}
@Test
public void testCopyToLocalWithUseRawLocalFileSystemOption() throws Exception {
Configuration conf = new Configuration();
FileSystem fSys = new RawLocalFileSystem();
Path fileToFS = new Path(getTestRootDir(), "fs.txt");
Path fileToLFS = new Path(getTestRootDir(), "test.txt");
Path crcFileAtLFS = new Path(getTestRootDir(), ".test.txt.crc");
fSys.initialize(new URI("file:///"), conf);
writeFile(fSys, fileToFS);
if (fSys.exists(crcFileAtLFS))
Assert.assertTrue("CRC files not deleted", fSys
.delete(crcFileAtLFS, true));
fSys.copyToLocalFile(false, fileToFS, fileToLFS, true);
Assert.assertFalse("CRC files are created", fSys.exists(crcFileAtLFS));
}
private void writeFile(FileSystem fs, Path name) throws IOException {
FSDataOutputStream stm = fs.create(name);
try {
stm.writeBytes("42\n");
} finally {
stm.close();
}
}
protected void createFile(Path path) throws IOException {
createFile(fSys, path);
}
@SuppressWarnings("deprecation")
private void rename(Path src, Path dst, boolean renameShouldSucceed,
boolean srcExists, boolean dstExists, Rename... options)
throws IOException {
fSys.rename(src, dst, options);
if (!renameShouldSucceed)
Assert.fail("rename should have thrown exception");
Assert.assertEquals("Source exists", srcExists, exists(fSys, src));
Assert.assertEquals("Destination exists", dstExists, exists(fSys, dst));
}
private boolean containsTestRootPath(Path path, FileStatus[] filteredPaths)
throws IOException {
Path testRootPath = getTestRootPath(fSys, path.toString());
for(int i = 0; i < filteredPaths.length; i ++) {
if (testRootPath.equals(
filteredPaths[i].getPath()))
return true;
}
return false;
}
}
| 37,942 | 31.908066 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.NetUtilsTestResolver;
import org.apache.hadoop.util.Progressable;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFileSystemCanonicalization {
static String[] authorities = {
"myfs://host",
"myfs://host.a",
"myfs://host.a.b",
};
static String[] ips = {
"myfs://127.0.0.1"
};
@BeforeClass
public static void initialize() throws Exception {
NetUtilsTestResolver.install();
}
// no ports
@Test
public void testShortAuthority() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123");
verifyPaths(fs, authorities, -1, true);
verifyPaths(fs, authorities, 123, true);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testPartialAuthority() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host.a", "myfs://host.a.b:123");
verifyPaths(fs, authorities, -1, true);
verifyPaths(fs, authorities, 123, true);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testFullAuthority() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host.a.b", "myfs://host.a.b:123");
verifyPaths(fs, authorities, -1, true);
verifyPaths(fs, authorities, 123, true);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
// with default ports
@Test
public void testShortAuthorityWithDefaultPort() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host:123", "myfs://host.a.b:123");
verifyPaths(fs, authorities, -1, true);
verifyPaths(fs, authorities, 123, true);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testPartialAuthorityWithDefaultPort() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host.a:123", "myfs://host.a.b:123");
verifyPaths(fs, authorities, -1, true);
verifyPaths(fs, authorities, 123, true);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testFullAuthorityWithDefaultPort() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host.a.b:123", "myfs://host.a.b:123");
verifyPaths(fs, authorities, -1, true);
verifyPaths(fs, authorities, 123, true);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
// with non-standard ports
@Test
public void testShortAuthorityWithOtherPort() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host:456", "myfs://host.a.b:456");
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, true);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testPartialAuthorityWithOtherPort() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host.a:456", "myfs://host.a.b:456");
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, true);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testFullAuthorityWithOtherPort() throws Exception {
FileSystem fs = getVerifiedFS("myfs://host.a.b:456", "myfs://host.a.b:456");
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, true);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
// ips
@Test
public void testIpAuthority() throws Exception {
FileSystem fs = getVerifiedFS("myfs://127.0.0.1", "myfs://127.0.0.1:123");
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, true);
verifyPaths(fs, ips, 123, true);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testIpAuthorityWithDefaultPort() throws Exception {
FileSystem fs = getVerifiedFS("myfs://127.0.0.1:123", "myfs://127.0.0.1:123");
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, true);
verifyPaths(fs, ips, 123, true);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testIpAuthorityWithOtherPort() throws Exception {
FileSystem fs = getVerifiedFS("myfs://127.0.0.1:456", "myfs://127.0.0.1:456");
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, true);
}
// bad stuff
@Test
public void testMismatchedSchemes() throws Exception {
FileSystem fs = getVerifiedFS("myfs2://simple", "myfs2://simple:123");
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testMismatchedHosts() throws Exception {
FileSystem fs = getVerifiedFS("myfs://simple", "myfs://simple:123");
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testNullAuthority() throws Exception {
FileSystem fs = getVerifiedFS("myfs:///", "myfs:///");
verifyPaths(fs, new String[]{ "myfs://" }, -1, true);
verifyPaths(fs, authorities, -1, false);
verifyPaths(fs, authorities, 123, false);
verifyPaths(fs, authorities, 456, false);
verifyPaths(fs, ips, -1, false);
verifyPaths(fs, ips, 123, false);
verifyPaths(fs, ips, 456, false);
}
@Test
public void testAuthorityFromDefaultFS() throws Exception {
Configuration config = new Configuration();
String defaultFsKey = CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123", config);
verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
config.set(defaultFsKey, "myfs://host");
verifyPaths(fs, new String[]{ "myfs://" }, -1, true);
config.set(defaultFsKey, "myfs2://host");
verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
config.set(defaultFsKey, "myfs://host:123");
verifyPaths(fs, new String[]{ "myfs://" }, -1, true);
config.set(defaultFsKey, "myfs://host:456");
verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
}
FileSystem getVerifiedFS(String authority, String canonical) throws Exception {
return getVerifiedFS(authority, canonical, new Configuration());
}
// create a fs from the authority, then check its uri against the given uri
// and the canonical. then try to fetch paths using the canonical
FileSystem getVerifiedFS(String authority, String canonical, Configuration conf)
throws Exception {
URI uri = URI.create(authority);
URI canonicalUri = URI.create(canonical);
FileSystem fs = new DummyFileSystem(uri, conf);
assertEquals(uri, fs.getUri());
assertEquals(canonicalUri, fs.getCanonicalUri());
verifyCheckPath(fs, "/file", true);
return fs;
}
void verifyPaths(FileSystem fs, String[] uris, int port, boolean shouldPass) {
for (String uri : uris) {
if (port != -1) uri += ":"+port;
verifyCheckPath(fs, uri+"/file", shouldPass);
}
}
void verifyCheckPath(FileSystem fs, String path, boolean shouldPass) {
Path rawPath = new Path(path);
Path fqPath = null;
Exception e = null;
try {
fqPath = fs.makeQualified(rawPath);
} catch (IllegalArgumentException iae) {
e = iae;
}
if (shouldPass) {
assertEquals(null, e);
String pathAuthority = rawPath.toUri().getAuthority();
if (pathAuthority == null) {
pathAuthority = fs.getUri().getAuthority();
}
assertEquals(pathAuthority, fqPath.toUri().getAuthority());
} else {
assertNotNull("did not fail", e);
assertEquals("Wrong FS: "+rawPath+", expected: "+fs.getUri(),
e.getMessage());
}
}
static class DummyFileSystem extends FileSystem {
URI uri;
static int defaultPort = 123;
DummyFileSystem(URI uri, Configuration conf) throws IOException {
this.uri = uri;
setConf(conf);
}
@Override
public URI getUri() {
return uri;
}
@Override
protected int getDefaultPort() {
return defaultPort;
}
@Override
protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(uri, getDefaultPort());
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
throw new IOException("not supposed to be here");
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
throw new IOException("not supposed to be here");
}
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("not supposed to be here");
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
throw new IOException("not supposed to be here");
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
throw new IOException("not supposed to be here");
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
throw new IOException("not supposed to be here");
}
@Override
public void setWorkingDirectory(Path new_dir) {
}
@Override
public Path getWorkingDirectory() {
return new Path("/");
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
throw new IOException("not supposed to be here");
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
throw new IOException("not supposed to be here");
}
}
}
| 12,157 | 31.508021 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Collection;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import static org.apache.hadoop.fs.Options.ChecksumOpt;
import static org.apache.hadoop.fs.Options.CreateOpts;
import static org.apache.hadoop.fs.Options.Rename;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@SuppressWarnings("deprecation")
public class TestHarFileSystem {
public static final Log LOG = LogFactory.getLog(TestHarFileSystem.class);
/**
* FileSystem methods that must not be overwritten by
* {@link HarFileSystem}. Either because there is a default implementation
* already available or because it is not relevant.
*/
@SuppressWarnings("deprecation")
private interface MustNotImplement {
public BlockLocation[] getFileBlockLocations(Path p, long start, long len);
public long getLength(Path f);
public FSDataOutputStream append(Path f, int bufferSize);
public void rename(Path src, Path dst, Rename... options);
public boolean exists(Path f);
public boolean isDirectory(Path f);
public boolean isFile(Path f);
public boolean createNewFile(Path f);
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException;
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException;
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress, ChecksumOpt checksumOpt);
public boolean mkdirs(Path f);
public FSDataInputStream open(Path f);
public FSDataOutputStream create(Path f);
public FSDataOutputStream create(Path f, boolean overwrite);
public FSDataOutputStream create(Path f, Progressable progress);
public FSDataOutputStream create(Path f, short replication);
public FSDataOutputStream create(Path f, short replication,
Progressable progress);
public FSDataOutputStream create(Path f, boolean overwrite,
int bufferSize);
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
Progressable progress);
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
short replication, long blockSize);
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress);
public FSDataOutputStream create(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication,
long blockSize, Progressable progress) throws IOException;
public FSDataOutputStream create(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication,
long blockSize, Progressable progress, ChecksumOpt checksumOpt)
throws IOException;
public String getName();
public boolean delete(Path f);
public short getReplication(Path src);
public void processDeleteOnExit();
public ContentSummary getContentSummary(Path f);
public FsStatus getStatus();
public FileStatus[] listStatus(Path f, PathFilter filter);
public FileStatus[] listStatus(Path[] files);
public FileStatus[] listStatus(Path[] files, PathFilter filter);
public FileStatus[] globStatus(Path pathPattern);
public FileStatus[] globStatus(Path pathPattern, PathFilter filter);
public Iterator<LocatedFileStatus> listFiles(Path path,
boolean isRecursive);
public Iterator<LocatedFileStatus> listLocatedStatus(Path f);
public Iterator<LocatedFileStatus> listLocatedStatus(Path f,
PathFilter filter);
public Iterator<FileStatus> listStatusIterator(Path f);
public void copyFromLocalFile(Path src, Path dst);
public void moveFromLocalFile(Path[] srcs, Path dst);
public void moveFromLocalFile(Path src, Path dst);
public void copyToLocalFile(Path src, Path dst);
public void copyToLocalFile(boolean delSrc, Path src, Path dst,
boolean useRawLocalFileSystem);
public void moveToLocalFile(Path src, Path dst);
public long getBlockSize(Path f);
public FSDataOutputStream primitiveCreate(Path f,
EnumSet<CreateFlag> createFlag, CreateOpts... opts);
public void primitiveMkdir(Path f, FsPermission absolutePermission,
boolean createParent);
public int getDefaultPort();
public String getCanonicalServiceName();
public Token<?> getDelegationToken(String renewer) throws IOException;
public FileChecksum getFileChecksum(Path f) throws IOException;
public boolean deleteOnExit(Path f) throws IOException;
public boolean cancelDeleteOnExit(Path f) throws IOException;
public Token<?>[] addDelegationTokens(String renewer, Credentials creds)
throws IOException;
public Path fixRelativePart(Path p);
public void concat(Path trg, Path [] psrcs) throws IOException;
public FSDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt) throws IOException;
public boolean primitiveMkdir(Path f, FsPermission absolutePermission)
throws IOException;
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException;
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException;
public void createSymlink(Path target, Path link, boolean createParent)
throws IOException;
public FileStatus getFileLinkStatus(Path f) throws IOException;
public boolean supportsSymlinks();
public Path getLinkTarget(Path f) throws IOException;
public Path resolveLink(Path f) throws IOException;
public void setVerifyChecksum(boolean verifyChecksum);
public void setWriteChecksum(boolean writeChecksum);
public Path createSnapshot(Path path, String snapshotName) throws
IOException;
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException;
public void deleteSnapshot(Path path, String snapshotName)
throws IOException;
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException;
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException;
public void removeDefaultAcl(Path path) throws IOException;
public void removeAcl(Path path) throws IOException;
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException;
public void setXAttr(Path path, String name, byte[] value)
throws IOException;
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException;
public byte[] getXAttr(Path path, String name) throws IOException;
public Map<String, byte[]> getXAttrs(Path path) throws IOException;
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException;
public List<String> listXAttrs(Path path) throws IOException;
public void removeXAttr(Path path, String name) throws IOException;
public AclStatus getAclStatus(Path path) throws IOException;
public void access(Path path, FsAction mode) throws IOException;
public void setStoragePolicy(Path src, String policyName)
throws IOException;
public BlockStoragePolicySpi getStoragePolicy(final Path src)
throws IOException;
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException;
}
@Test
public void testHarUri() {
final Configuration conf = new Configuration();
checkInvalidPath("har://hdfs-/foo.har", conf);
checkInvalidPath("har://hdfs/foo.har", conf);
checkInvalidPath("har://-hdfs/foo.har", conf);
checkInvalidPath("har://-/foo.har", conf);
checkInvalidPath("har://127.0.0.1-/foo.har", conf);
checkInvalidPath("har://127.0.0.1/foo.har", conf);
}
static void checkInvalidPath(String s, Configuration conf) {
System.out.println("\ncheckInvalidPath: " + s);
final Path p = new Path(s);
try {
p.getFileSystem(conf);
Assert.fail(p + " is an invalid path.");
} catch (IOException e) {
// Expected
}
}
@Test
public void testFileChecksum() throws Exception {
final Path p = new Path("har://file-localhost/foo.har/file1");
final HarFileSystem harfs = new HarFileSystem();
try {
Assert.assertEquals(null, harfs.getFileChecksum(p));
} finally {
if (harfs != null) {
harfs.close();
}
}
}
/**
* Test how block location offsets and lengths are fixed.
*/
@Test
public void testFixBlockLocations() {
// do some tests where start == 0
{
// case 1: range starts before current har block and ends after
BlockLocation[] b = { new BlockLocation(null, null, 10, 10) };
HarFileSystem.fixBlockLocations(b, 0, 20, 5);
assertEquals(b[0].getOffset(), 5);
assertEquals(b[0].getLength(), 10);
}
{
// case 2: range starts in current har block and ends after
BlockLocation[] b = { new BlockLocation(null, null, 10, 10) };
HarFileSystem.fixBlockLocations(b, 0, 20, 15);
assertEquals(b[0].getOffset(), 0);
assertEquals(b[0].getLength(), 5);
}
{
// case 3: range starts before current har block and ends in
// current har block
BlockLocation[] b = { new BlockLocation(null, null, 10, 10) };
HarFileSystem.fixBlockLocations(b, 0, 10, 5);
assertEquals(b[0].getOffset(), 5);
assertEquals(b[0].getLength(), 5);
}
{
// case 4: range starts and ends in current har block
BlockLocation[] b = { new BlockLocation(null, null, 10, 10) };
HarFileSystem.fixBlockLocations(b, 0, 6, 12);
assertEquals(b[0].getOffset(), 0);
assertEquals(b[0].getLength(), 6);
}
// now try a range where start == 3
{
// case 5: range starts before current har block and ends after
BlockLocation[] b = { new BlockLocation(null, null, 10, 10) };
HarFileSystem.fixBlockLocations(b, 3, 20, 5);
assertEquals(b[0].getOffset(), 5);
assertEquals(b[0].getLength(), 10);
}
{
// case 6: range starts in current har block and ends after
BlockLocation[] b = { new BlockLocation(null, null, 10, 10) };
HarFileSystem.fixBlockLocations(b, 3, 20, 15);
assertEquals(b[0].getOffset(), 3);
assertEquals(b[0].getLength(), 2);
}
{
// case 7: range starts before current har block and ends in
// current har block
BlockLocation[] b = { new BlockLocation(null, null, 10, 10) };
HarFileSystem.fixBlockLocations(b, 3, 7, 5);
assertEquals(b[0].getOffset(), 5);
assertEquals(b[0].getLength(), 5);
}
{
// case 8: range starts and ends in current har block
BlockLocation[] b = { new BlockLocation(null, null, 10, 10) };
HarFileSystem.fixBlockLocations(b, 3, 3, 12);
assertEquals(b[0].getOffset(), 3);
assertEquals(b[0].getLength(), 3);
}
// test case from JIRA MAPREDUCE-1752
{
BlockLocation[] b = { new BlockLocation(null, null, 512, 512),
new BlockLocation(null, null, 1024, 512) };
HarFileSystem.fixBlockLocations(b, 0, 512, 896);
assertEquals(b[0].getOffset(), 0);
assertEquals(b[0].getLength(), 128);
assertEquals(b[1].getOffset(), 128);
assertEquals(b[1].getLength(), 384);
}
}
@Test
public void testInheritedMethodsImplemented() throws Exception {
int errors = 0;
for (Method m : FileSystem.class.getDeclaredMethods()) {
if (Modifier.isStatic(m.getModifiers()) ||
Modifier.isPrivate(m.getModifiers()) ||
Modifier.isFinal(m.getModifiers())) {
continue;
}
try {
MustNotImplement.class.getMethod(m.getName(), m.getParameterTypes());
try {
HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
LOG.error("HarFileSystem MUST not implement " + m);
errors++;
} catch (NoSuchMethodException ex) {
// Expected
}
} catch (NoSuchMethodException exc) {
try {
HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
} catch (NoSuchMethodException exc2) {
LOG.error("HarFileSystem MUST implement " + m);
errors++;
}
}
}
assertTrue((errors + " methods were not overridden correctly - see log"),
errors <= 0);
}
}
| 14,449 | 38.480874 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
import static org.junit.Assert.fail;
public class TestFileContext {
private static final Log LOG = LogFactory.getLog(TestFileContext.class);
@Test
public void testDefaultURIWithoutScheme() throws Exception {
final Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "/");
try {
FileContext.getFileContext(conf);
fail(UnsupportedFileSystemException.class + " not thrown!");
} catch (UnsupportedFileSystemException ufse) {
LOG.info("Expected exception: ", ufse);
}
}
}
| 1,538 | 35.642857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assume.*;
/** This test LocalDirAllocator works correctly;
* Every test case uses different buffer dirs to
* enforce the AllocatorPerContext initialization.
* This test does not run on Cygwin because under Cygwin
* a directory can be created in a read-only directory
* which breaks this test.
*/
@RunWith(Parameterized.class)
public class TestLocalDirAllocator {
final static private Configuration conf = new Configuration();
final static private String BUFFER_DIR_ROOT = "build/test/temp";
final static private String ABSOLUTE_DIR_ROOT;
final static private String QUALIFIED_DIR_ROOT;
final static private Path BUFFER_PATH_ROOT = new Path(BUFFER_DIR_ROOT);
final static private File BUFFER_ROOT = new File(BUFFER_DIR_ROOT);
final static private String CONTEXT = "mapred.local.dir";
final static private String FILENAME = "block";
final static private LocalDirAllocator dirAllocator =
new LocalDirAllocator(CONTEXT);
static LocalFileSystem localFs;
final static private boolean isWindows =
System.getProperty("os.name").startsWith("Windows");
final static int SMALL_FILE_SIZE = 100;
final static private String RELATIVE = "/RELATIVE";
final static private String ABSOLUTE = "/ABSOLUTE";
final static private String QUALIFIED = "/QUALIFIED";
final private String ROOT;
final private String PREFIX;
static {
try {
localFs = FileSystem.getLocal(conf);
rmBufferDirs();
} catch(IOException e) {
System.out.println(e.getMessage());
e.printStackTrace();
System.exit(-1);
}
// absolute path in test environment
// /home/testuser/src/hadoop-common-project/hadoop-common/build/test/temp
ABSOLUTE_DIR_ROOT = new Path(localFs.getWorkingDirectory(),
BUFFER_DIR_ROOT).toUri().getPath();
// file:/home/testuser/src/hadoop-common-project/hadoop-common/build/test/temp
QUALIFIED_DIR_ROOT = new Path(localFs.getWorkingDirectory(),
BUFFER_DIR_ROOT).toUri().toString();
}
public TestLocalDirAllocator(String root, String prefix) {
ROOT = root;
PREFIX = prefix;
}
@Parameters
public static Collection<Object[]> params() {
Object [][] data = new Object[][] {
{ BUFFER_DIR_ROOT, RELATIVE },
{ ABSOLUTE_DIR_ROOT, ABSOLUTE },
{ QUALIFIED_DIR_ROOT, QUALIFIED }
};
return Arrays.asList(data);
}
private static void rmBufferDirs() throws IOException {
assertTrue(!localFs.exists(BUFFER_PATH_ROOT) ||
localFs.delete(BUFFER_PATH_ROOT, true));
}
private static void validateTempDirCreation(String dir) throws IOException {
File result = createTempFile(SMALL_FILE_SIZE);
assertTrue("Checking for " + dir + " in " + result + " - FAILED!",
result.getPath().startsWith(new Path(dir, FILENAME).toUri().getPath()));
}
private static File createTempFile() throws IOException {
return createTempFile(-1);
}
private static File createTempFile(long size) throws IOException {
File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf);
result.delete();
return result;
}
private String buildBufferDir(String dir, int i) {
return dir + PREFIX + i;
}
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test (timeout = 30000)
public void test0() throws Exception {
if (isWindows) return;
String dir0 = buildBufferDir(ROOT, 0);
String dir1 = buildBufferDir(ROOT, 1);
try {
conf.set(CONTEXT, dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir1);
validateTempDirCreation(dir1);
} finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
/** Two buffer dirs. The first dir exists & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test (timeout = 30000)
public void testROBufferDirAndRWBufferDir() throws Exception {
if (isWindows) return;
String dir1 = buildBufferDir(ROOT, 1);
String dir2 = buildBufferDir(ROOT, 2);
try {
conf.set(CONTEXT, dir1 + "," + dir2);
assertTrue(localFs.mkdirs(new Path(dir2)));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir2);
validateTempDirCreation(dir2);
} finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
/** Two buffer dirs. Both do not exist but on a RW disk.
* Check if tmp dirs are allocated in a round-robin
*/
@Test (timeout = 30000)
public void testDirsNotExist() throws Exception {
if (isWindows) return;
String dir2 = buildBufferDir(ROOT, 2);
String dir3 = buildBufferDir(ROOT, 3);
try {
conf.set(CONTEXT, dir2 + "," + dir3);
// create the first file, and then figure the round-robin sequence
createTempFile(SMALL_FILE_SIZE);
int firstDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 2 : 3;
int secondDirIdx = (firstDirIdx == 2) ? 3 : 2;
// check if tmp dirs are allocated in a round-robin manner
validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx));
validateTempDirCreation(buildBufferDir(ROOT, secondDirIdx));
validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx));
} finally {
rmBufferDirs();
}
}
/** Two buffer dirs. Both exists and on a R/W disk.
* Later disk1 becomes read-only.
* @throws Exception
*/
@Test (timeout = 30000)
public void testRWBufferDirBecomesRO() throws Exception {
if (isWindows) return;
String dir3 = buildBufferDir(ROOT, 3);
String dir4 = buildBufferDir(ROOT, 4);
try {
conf.set(CONTEXT, dir3 + "," + dir4);
assertTrue(localFs.mkdirs(new Path(dir3)));
assertTrue(localFs.mkdirs(new Path(dir4)));
// Create the first small file
createTempFile(SMALL_FILE_SIZE);
// Determine the round-robin sequence
int nextDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
validateTempDirCreation(buildBufferDir(ROOT, nextDirIdx));
// change buffer directory 2 to be read only
new File(new Path(dir4).toUri().getPath()).setReadOnly();
validateTempDirCreation(dir3);
validateTempDirCreation(dir3);
} finally {
rmBufferDirs();
}
}
/**
* Two buffer dirs, on read-write disk.
*
* Try to create a whole bunch of files.
* Verify that they do indeed all get created where they should.
*
* Would ideally check statistical properties of distribution, but
* we don't have the nerve to risk false-positives here.
*
* @throws Exception
*/
static final int TRIALS = 100;
@Test (timeout = 30000)
public void testCreateManyFiles() throws Exception {
if (isWindows) return;
String dir5 = buildBufferDir(ROOT, 5);
String dir6 = buildBufferDir(ROOT, 6);
try {
conf.set(CONTEXT, dir5 + "," + dir6);
assertTrue(localFs.mkdirs(new Path(dir5)));
assertTrue(localFs.mkdirs(new Path(dir6)));
int inDir5=0, inDir6=0;
for(int i = 0; i < TRIALS; ++i) {
File result = createTempFile();
if(result.getPath().startsWith(
new Path(dir5, FILENAME).toUri().getPath())) {
inDir5++;
} else if(result.getPath().startsWith(
new Path(dir6, FILENAME).toUri().getPath())) {
inDir6++;
}
result.delete();
}
assertTrue(inDir5 + inDir6 == TRIALS);
} finally {
rmBufferDirs();
}
}
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* getLocalPathForWrite with checkAccess set to false should create a parent
* directory. With checkAccess true, the directory should not be created.
* @throws Exception
*/
@Test (timeout = 30000)
public void testLocalPathForWriteDirCreation() throws IOException {
String dir0 = buildBufferDir(ROOT, 0);
String dir1 = buildBufferDir(ROOT, 1);
try {
conf.set(CONTEXT, dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
Path p1 =
dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
Path p2 =
dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
false);
try {
localFs.getFileStatus(p2.getParent());
} catch (Exception e) {
assertEquals(e.getClass(), FileNotFoundException.class);
}
} finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
/** Test no side effect files are left over. After creating a temp
* temp file, remove both the temp file and its parent. Verify that
* no files or directories are left over as can happen when File objects
* are mistakenly created from fully qualified path strings.
* @throws IOException
*/
@Test (timeout = 30000)
public void testNoSideEffects() throws IOException {
assumeTrue(!isWindows);
String dir = buildBufferDir(ROOT, 0);
try {
conf.set(CONTEXT, dir);
File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
assertTrue(result.delete());
assertTrue(result.getParentFile().delete());
assertFalse(new File(dir).exists());
} finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
/**
* Test getLocalPathToRead() returns correct filename and "file" schema.
*
* @throws IOException
*/
@Test (timeout = 30000)
public void testGetLocalPathToRead() throws IOException {
assumeTrue(!isWindows);
String dir = buildBufferDir(ROOT, 0);
try {
conf.set(CONTEXT, dir);
assertTrue(localFs.mkdirs(new Path(dir)));
File f1 = dirAllocator.createTmpFileForWrite(FILENAME, SMALL_FILE_SIZE,
conf);
Path p1 = dirAllocator.getLocalPathToRead(f1.getName(), conf);
assertEquals(f1.getName(), p1.getName());
assertEquals("file", p1.getFileSystem(conf).getUri().getScheme());
} finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
/**
* Test that {@link LocalDirAllocator#getAllLocalPathsToRead(String, Configuration)}
* returns correct filenames and "file" schema.
*
* @throws IOException
*/
@Test (timeout = 30000)
public void testGetAllLocalPathsToRead() throws IOException {
assumeTrue(!isWindows);
String dir0 = buildBufferDir(ROOT, 0);
String dir1 = buildBufferDir(ROOT, 1);
try {
conf.set(CONTEXT, dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir0)));
assertTrue(localFs.mkdirs(new Path(dir1)));
localFs.create(new Path(dir0 + Path.SEPARATOR + FILENAME));
localFs.create(new Path(dir1 + Path.SEPARATOR + FILENAME));
// check both the paths are returned as paths to read:
final Iterable<Path> pathIterable = dirAllocator.getAllLocalPathsToRead(FILENAME, conf);
int count = 0;
for (final Path p: pathIterable) {
count++;
assertEquals(FILENAME, p.getName());
assertEquals("file", p.getFileSystem(conf).getUri().getScheme());
}
assertEquals(2, count);
// test #next() while no element to iterate any more:
try {
Path p = pathIterable.iterator().next();
assertFalse("NoSuchElementException must be thrown, but returned ["+p
+"] instead.", true); // exception expected
} catch (NoSuchElementException nsee) {
// okay
}
// test modification not allowed:
final Iterable<Path> pathIterable2 = dirAllocator.getAllLocalPathsToRead(FILENAME, conf);
final Iterator<Path> it = pathIterable2.iterator();
try {
it.remove();
assertFalse(true); // exception expected
} catch (UnsupportedOperationException uoe) {
// okay
}
} finally {
Shell.execCommand(new String[] { "chmod", "u+w", BUFFER_DIR_ROOT });
rmBufferDirs();
}
}
@Test (timeout = 30000)
public void testRemoveContext() throws IOException {
String dir = buildBufferDir(ROOT, 0);
try {
String contextCfgItemName = "application_1340842292563_0004.app.cache.dirs";
conf.set(contextCfgItemName, dir);
LocalDirAllocator localDirAllocator = new LocalDirAllocator(
contextCfgItemName);
localDirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
assertTrue(LocalDirAllocator.isContextValid(contextCfgItemName));
LocalDirAllocator.removeContext(contextCfgItemName);
assertFalse(LocalDirAllocator.isContextValid(contextCfgItemName));
} finally {
rmBufferDirs();
}
}
}
| 14,636 | 33.767221 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.net.URI;
import java.util.Random;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
/**
* Helper class for unit tests.
*/
public class FileSystemTestHelper {
private static final int DEFAULT_BLOCK_SIZE = 1024;
private static final int DEFAULT_NUM_BLOCKS = 2;
private static final short DEFAULT_NUM_REPL = 1;
protected final String testRootDir;
private String absTestRootDir = null;
/**
* Create helper with test root located at <wd>/build/test/data
*/
public FileSystemTestHelper() {
this(System.getProperty("test.build.data", "target/test/data") + "/" + RandomStringUtils.randomAlphanumeric(10));
}
/**
* Create helper with the specified test root dir
*/
public FileSystemTestHelper(String testRootDir) {
this.testRootDir = testRootDir;
}
public static void addFileSystemForTesting(URI uri, Configuration conf,
FileSystem fs) throws IOException {
FileSystem.addFileSystemForTesting(uri, conf, fs);
}
public static int getDefaultBlockSize() {
return DEFAULT_BLOCK_SIZE;
}
public static byte[] getFileData(int numOfBlocks, long blockSize) {
byte[] data = new byte[(int) (numOfBlocks * blockSize)];
for (int i = 0; i < data.length; i++) {
data[i] = (byte) (i % 10);
}
return data;
}
public String getTestRootDir() {
return testRootDir;
}
/*
* get testRootPath qualified for fSys
*/
public Path getTestRootPath(FileSystem fSys) {
return fSys.makeQualified(new Path(testRootDir));
}
/*
* get testRootPath + pathString qualified for fSys
*/
public Path getTestRootPath(FileSystem fSys, String pathString) {
return fSys.makeQualified(new Path(testRootDir, pathString));
}
// the getAbsolutexxx method is needed because the root test dir
// can be messed up by changing the working dir since the TEST_ROOT_PATH
// is often relative to the working directory of process
// running the unit tests.
String getAbsoluteTestRootDir(FileSystem fSys)
throws IOException {
// NOTE: can't cache because of different filesystems!
//if (absTestRootDir == null)
if (new Path(testRootDir).isAbsolute()) {
absTestRootDir = testRootDir;
} else {
absTestRootDir = fSys.getWorkingDirectory().toString() + "/"
+ testRootDir;
}
//}
return absTestRootDir;
}
public Path getAbsoluteTestRootPath(FileSystem fSys) throws IOException {
return fSys.makeQualified(new Path(getAbsoluteTestRootDir(fSys)));
}
public Path getDefaultWorkingDirectory(FileSystem fSys)
throws IOException {
return getTestRootPath(fSys, "/user/" + System.getProperty("user.name"))
.makeQualified(fSys.getUri(),
fSys.getWorkingDirectory());
}
/*
* Create files with numBlocks blocks each with block size blockSize.
*/
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, short numRepl, boolean createParent) throws IOException {
return createFile(fSys, path, getFileData(numBlocks, blockSize),
blockSize, numRepl);
}
public static long createFile(FileSystem fSys, Path path, byte[] data,
int blockSize, short numRepl) throws IOException {
FSDataOutputStream out =
fSys.create(path, false, 4096, numRepl, blockSize);
try {
out.write(data, 0, data.length);
} finally {
out.close();
}
return data.length;
}
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, boolean createParent) throws IOException {
return createFile(fSys, path, numBlocks, blockSize,
fSys.getDefaultReplication(path), true);
}
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize) throws IOException {
return createFile(fSys, path, numBlocks, blockSize, true);
}
public static long createFile(FileSystem fSys, Path path) throws IOException {
return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE,
DEFAULT_NUM_REPL, true);
}
public long createFile(FileSystem fSys, String name) throws IOException {
Path path = getTestRootPath(fSys, name);
return createFile(fSys, path);
}
public static boolean exists(FileSystem fSys, Path p) throws IOException {
return fSys.exists(p);
}
public static boolean isFile(FileSystem fSys, Path p) throws IOException {
try {
return fSys.getFileStatus(p).isFile();
} catch (FileNotFoundException e) {
return false;
}
}
public static boolean isDir(FileSystem fSys, Path p) throws IOException {
try {
return fSys.getFileStatus(p).isDirectory();
} catch (FileNotFoundException e) {
return false;
}
}
static String writeFile(FileSystem fileSys, Path name, int fileSize)
throws IOException {
final long seed = 0xDEADBEEFL;
// Create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
return new String(buffer);
}
static String readFile(FileSystem fs, Path name, int buflen)
throws IOException {
byte[] b = new byte[buflen];
int offset = 0;
FSDataInputStream in = fs.open(name);
for (int remaining, n;
(remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
offset += n);
assertEquals(offset, Math.min(b.length, in.getPos()));
in.close();
String s = new String(b, 0, offset);
return s;
}
public FileStatus containsPath(FileSystem fSys, Path path,
FileStatus[] dirList)
throws IOException {
for(int i = 0; i < dirList.length; i ++) {
if (getTestRootPath(fSys, path.toString()).equals(
dirList[i].getPath()))
return dirList[i];
}
return null;
}
public static FileStatus containsPath(Path path,
FileStatus[] dirList)
throws IOException {
for(int i = 0; i < dirList.length; i ++) {
if (path.equals(dirList[i].getPath()))
return dirList[i];
}
return null;
}
public FileStatus containsPath(FileSystem fSys, String path, FileStatus[] dirList)
throws IOException {
return containsPath(fSys, new Path(path), dirList);
}
public static enum fileType {isDir, isFile, isSymlink};
public static void checkFileStatus(FileSystem aFs, String path,
fileType expectedType) throws IOException {
FileStatus s = aFs.getFileStatus(new Path(path));
Assert.assertNotNull(s);
if (expectedType == fileType.isDir) {
Assert.assertTrue(s.isDirectory());
} else if (expectedType == fileType.isFile) {
Assert.assertTrue(s.isFile());
} else if (expectedType == fileType.isSymlink) {
Assert.assertTrue(s.isSymlink());
}
Assert.assertEquals(aFs.makeQualified(new Path(path)), s.getPath());
}
/**
* Class to enable easier mocking of a FileSystem
* Use getRawFileSystem to retrieve the mock
*/
public static class MockFileSystem extends FilterFileSystem {
public MockFileSystem() {
// it's a bit ackward to mock ourselves, but it allows the visibility
// of methods to be increased
super(mock(MockFileSystem.class));
}
@Override
public MockFileSystem getRawFileSystem() {
return (MockFileSystem) super.getRawFileSystem();
}
// these basic methods need to directly propagate to the mock to be
// more transparent
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
fs.initialize(uri, conf);
}
@Override
public String getCanonicalServiceName() {
return fs.getCanonicalServiceName();
}
@Override
public FileSystem[] getChildFileSystems() {
return fs.getChildFileSystems();
}
@Override // publicly expose for mocking
public Token<?> getDelegationToken(String renewer) throws IOException {
return fs.getDelegationToken(renewer);
}
}
}
| 9,230 | 30.831034 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.util.DataChecksum;
import org.junit.Test;
public class TestFsOptions {
@Test
public void testProcessChecksumOpt() {
ChecksumOpt defaultOpt = new ChecksumOpt(DataChecksum.Type.CRC32, 512);
ChecksumOpt finalOpt;
// Give a null
finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, null);
checkParams(defaultOpt, finalOpt);
// null with bpc
finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, null, 1024);
checkParams(DataChecksum.Type.CRC32, 1024, finalOpt);
ChecksumOpt myOpt = new ChecksumOpt();
// custom with unspecified parameters
finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, myOpt);
checkParams(defaultOpt, finalOpt);
myOpt = new ChecksumOpt(DataChecksum.Type.CRC32C, 2048);
// custom config
finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, myOpt);
checkParams(DataChecksum.Type.CRC32C, 2048, finalOpt);
// custom config + bpc
finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, myOpt, 4096);
checkParams(DataChecksum.Type.CRC32C, 4096, finalOpt);
}
private void checkParams(ChecksumOpt expected, ChecksumOpt obtained) {
assertEquals(expected.getChecksumType(), obtained.getChecksumType());
assertEquals(expected.getBytesPerChecksum(), obtained.getBytesPerChecksum());
}
private void checkParams(DataChecksum.Type type, int bpc, ChecksumOpt obtained) {
assertEquals(type, obtained.getChecksumType());
assertEquals(bpc, obtained.getBytesPerChecksum());
}
}
| 2,444 | 34.434783 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
/**
* Testing the correctness of FileSystem.getFileBlockLocations.
*/
public class TestGetFileBlockLocations extends TestCase {
private static String TEST_ROOT_DIR =
System.getProperty("test.build.data", "/tmp/testGetFileBlockLocations");
private static final int FileLength = 4 * 1024 * 1024; // 4MB
private Configuration conf;
private Path path;
private FileSystem fs;
private Random random;
/**
* @see TestCase#setUp()
*/
@Override
protected void setUp() throws IOException {
conf = new Configuration();
Path rootPath = new Path(TEST_ROOT_DIR);
path = new Path(rootPath, "TestGetFileBlockLocations");
fs = rootPath.getFileSystem(conf);
FSDataOutputStream fsdos = fs.create(path, true);
byte[] buffer = new byte[1024];
while (fsdos.getPos() < FileLength) {
fsdos.write(buffer);
}
fsdos.close();
random = new Random(System.nanoTime());
}
private void oneTest(int offBegin, int offEnd, FileStatus status)
throws IOException {
if (offBegin > offEnd) {
int tmp = offBegin;
offBegin = offEnd;
offEnd = tmp;
}
BlockLocation[] locations =
fs.getFileBlockLocations(status, offBegin, offEnd - offBegin);
if (offBegin < status.getLen()) {
Arrays.sort(locations, new Comparator<BlockLocation>() {
@Override
public int compare(BlockLocation arg0, BlockLocation arg1) {
long cmprv = arg0.getOffset() - arg1.getOffset();
if (cmprv < 0) return -1;
if (cmprv > 0) return 1;
cmprv = arg0.getLength() - arg1.getLength();
if (cmprv < 0) return -1;
if (cmprv > 0) return 1;
return 0;
}
});
offBegin = (int) Math.min(offBegin, status.getLen() - 1);
offEnd = (int) Math.min(offEnd, status.getLen());
BlockLocation first = locations[0];
BlockLocation last = locations[locations.length - 1];
assertTrue(first.getOffset() <= offBegin);
assertTrue(offEnd <= last.getOffset() + last.getLength());
} else {
assertTrue(locations.length == 0);
}
}
/**
* @see TestCase#tearDown()
*/
@Override
protected void tearDown() throws IOException {
fs.delete(path, true);
fs.close();
}
public void testFailureNegativeParameters() throws IOException {
FileStatus status = fs.getFileStatus(path);
try {
BlockLocation[] locations = fs.getFileBlockLocations(status, -1, 100);
fail("Expecting exception being throw");
} catch (IllegalArgumentException e) {
}
try {
BlockLocation[] locations = fs.getFileBlockLocations(status, 100, -1);
fail("Expecting exception being throw");
} catch (IllegalArgumentException e) {
}
}
public void testGetFileBlockLocations1() throws IOException {
FileStatus status = fs.getFileStatus(path);
oneTest(0, (int) status.getLen(), status);
oneTest(0, (int) status.getLen() * 2, status);
oneTest((int) status.getLen() * 2, (int) status.getLen() * 4, status);
oneTest((int) status.getLen() / 2, (int) status.getLen() * 3, status);
oneTest((int) status.getLen(), (int) status.getLen() * 2, status);
for (int i = 0; i < 10; ++i) {
oneTest((int) status.getLen() * i / 10, (int) status.getLen() * (i + 1)
/ 10, status);
}
}
public void testGetFileBlockLocations2() throws IOException {
FileStatus status = fs.getFileStatus(path);
for (int i = 0; i < 1000; ++i) {
int offBegin = random.nextInt((int) (2 * status.getLen()));
int offEnd = random.nextInt((int) (2 * status.getLen()));
oneTest(offBegin, offEnd, status);
}
}
}
| 4,681 | 32.205674 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.net.URI;
import java.util.Iterator;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
public class TestFilterFs extends TestCase {
private static final Log LOG = FileSystem.LOG;
public static class DontCheck {
public void checkScheme(URI uri, String supportedScheme) { }
public Iterator<FileStatus> listStatusIterator(Path f) {
return null;
}
public Iterator<LocatedFileStatus> listLocatedStatus(final Path f) {
return null;
}
}
public void testFilterFileSystem() throws Exception {
for (Method m : AbstractFileSystem.class.getDeclaredMethods()) {
if (Modifier.isStatic(m.getModifiers()))
continue;
if (Modifier.isPrivate(m.getModifiers()))
continue;
if (Modifier.isFinal(m.getModifiers()))
continue;
try {
DontCheck.class.getMethod(m.getName(), m.getParameterTypes());
LOG.info("Skipping " + m);
} catch (NoSuchMethodException exc) {
LOG.info("Testing " + m);
try{
FilterFs.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
}
catch(NoSuchMethodException exc2){
LOG.error("FilterFileSystem doesn't implement " + m);
throw exc2;
}
}
}
}
}
| 2,203 | 30.942029 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.StringReader;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assume;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestStat extends FileSystemTestHelper {
static{
FileSystem.enableSymlinks();
}
private static Stat stat;
@BeforeClass
public static void setup() throws Exception {
stat = new Stat(new Path("/dummypath"),
4096l, false, FileSystem.get(new Configuration()));
}
private class StatOutput {
final String doesNotExist;
final String directory;
final String file;
final String[] symlinks;
final String stickydir;
StatOutput(String doesNotExist, String directory, String file,
String[] symlinks, String stickydir) {
this.doesNotExist = doesNotExist;
this.directory = directory;
this.file = file;
this.symlinks = symlinks;
this.stickydir = stickydir;
}
void test() throws Exception {
BufferedReader br;
FileStatus status;
try {
br = new BufferedReader(new StringReader(doesNotExist));
stat.parseExecResult(br);
} catch (FileNotFoundException e) {
// expected
}
br = new BufferedReader(new StringReader(directory));
stat.parseExecResult(br);
status = stat.getFileStatusForTesting();
assertTrue(status.isDirectory());
br = new BufferedReader(new StringReader(file));
stat.parseExecResult(br);
status = stat.getFileStatusForTesting();
assertTrue(status.isFile());
for (String symlink : symlinks) {
br = new BufferedReader(new StringReader(symlink));
stat.parseExecResult(br);
status = stat.getFileStatusForTesting();
assertTrue(status.isSymlink());
}
br = new BufferedReader(new StringReader(stickydir));
stat.parseExecResult(br);
status = stat.getFileStatusForTesting();
assertTrue(status.isDirectory());
assertTrue(status.getPermission().getStickyBit());
}
}
@Test(timeout=10000)
public void testStatLinux() throws Exception {
String[] symlinks = new String[] {
"6,symbolic link,1373584236,1373584236,777,andrew,andrew,`link' -> `target'",
"6,symbolic link,1373584236,1373584236,777,andrew,andrew,'link' -> 'target'"
};
StatOutput linux = new StatOutput(
"stat: cannot stat `watermelon': No such file or directory",
"4096,directory,1373584236,1373586485,755,andrew,root,`.'",
"0,regular empty file,1373584228,1373584228,644,andrew,andrew,`target'",
symlinks,
"4096,directory,1374622334,1375124212,1755,andrew,andrew,`stickydir'");
linux.test();
}
@Test(timeout=10000)
public void testStatFreeBSD() throws Exception {
String[] symlinks = new String[] {
"6,Symbolic Link,1373508941,1373508941,120755,awang,awang,`link' -> `target'"
};
StatOutput freebsd = new StatOutput(
"stat: symtest/link: stat: No such file or directory",
"512,Directory,1373583695,1373583669,40755,awang,awang,`link' -> `'",
"0,Regular File,1373508937,1373508937,100644,awang,awang,`link' -> `'",
symlinks,
"512,Directory,1375139537,1375139537,41755,awang,awang,`link' -> `'");
freebsd.test();
}
@Test(timeout=10000)
public void testStatFileNotFound() throws Exception {
Assume.assumeTrue(Stat.isAvailable());
try {
stat.getFileStatus();
fail("Expected FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
}
}
@Test(timeout=10000)
public void testStatEnvironment() throws Exception {
assertEquals("C", stat.getEnvironment("LANG"));
}
@Test(timeout=10000)
public void testStat() throws Exception {
Assume.assumeTrue(Stat.isAvailable());
FileSystem fs = FileSystem.getLocal(new Configuration());
Path testDir = new Path(getTestRootPath(fs), "teststat");
fs.mkdirs(testDir);
Path sub1 = new Path(testDir, "sub1");
Path sub2 = new Path(testDir, "sub2");
fs.mkdirs(sub1);
fs.createSymlink(sub1, sub2, false);
FileStatus stat1 = new Stat(sub1, 4096l, false, fs).getFileStatus();
FileStatus stat2 = new Stat(sub2, 0, false, fs).getFileStatus();
assertTrue(stat1.isDirectory());
assertFalse(stat2.isDirectory());
fs.delete(testDir, true);
}
}
| 5,414 | 32.63354 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.EnumSet;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable;
import org.junit.Test;
public class TestAfsCheckPath {
private static int DEFAULT_PORT = 1234;
private static int OTHER_PORT = 4321;
@Test
public void testCheckPathWithNoPorts() throws URISyntaxException {
URI uri = new URI("dummy://dummy-host");
AbstractFileSystem afs = new DummyFileSystem(uri);
afs.checkPath(new Path("dummy://dummy-host"));
}
@Test
public void testCheckPathWithDefaultPort() throws URISyntaxException {
URI uri = new URI("dummy://dummy-host:" + DEFAULT_PORT);
AbstractFileSystem afs = new DummyFileSystem(uri);
afs.checkPath(new Path("dummy://dummy-host:" + DEFAULT_PORT));
}
@Test
public void testCheckPathWithTheSameNonDefaultPort()
throws URISyntaxException {
URI uri = new URI("dummy://dummy-host:" + OTHER_PORT);
AbstractFileSystem afs = new DummyFileSystem(uri);
afs.checkPath(new Path("dummy://dummy-host:" + OTHER_PORT));
}
@Test(expected=InvalidPathException.class)
public void testCheckPathWithDifferentPorts() throws URISyntaxException {
URI uri = new URI("dummy://dummy-host:" + DEFAULT_PORT);
AbstractFileSystem afs = new DummyFileSystem(uri);
afs.checkPath(new Path("dummy://dummy-host:" + OTHER_PORT));
}
private static class DummyFileSystem extends AbstractFileSystem {
public DummyFileSystem(URI uri) throws URISyntaxException {
super(uri, "dummy", true, DEFAULT_PORT);
}
@Override
public int getUriDefaultPort() {
return DEFAULT_PORT;
}
@Override
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag,
FsPermission absolutePermission, int bufferSize, short replication,
long blockSize, Progressable progress, ChecksumOpt checksumOpt,
boolean createParent) throws IOException {
// deliberately empty
return null;
}
@Override
public boolean delete(Path f, boolean recursive)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
// deliberately empty
return false;
}
@Override
public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
throws IOException {
// deliberately empty
return null;
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
// deliberately empty
return null;
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
// deliberately empty
return null;
}
@Override
public FsStatus getFsStatus() throws IOException {
// deliberately empty
return null;
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
// deliberately empty
return null;
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
// deliberately empty
return null;
}
@Override
public void mkdir(Path dir, FsPermission permission, boolean createParent)
throws IOException {
// deliberately empty
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
// deliberately empty
return null;
}
@Override
public void renameInternal(Path src, Path dst) throws IOException {
// deliberately empty
}
@Override
public void setOwner(Path f, String username, String groupname)
throws IOException {
// deliberately empty
}
@Override
public void setPermission(Path f, FsPermission permission)
throws IOException {
// deliberately empty
}
@Override
public boolean setReplication(Path f, short replication) throws IOException {
// deliberately empty
return false;
}
@Override
public void setTimes(Path f, long mtime, long atime) throws IOException {
// deliberately empty
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) throws IOException {
// deliberately empty
}
}
}
| 5,257 | 28.539326 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Arrays;
import org.junit.After;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.apache.hadoop.fs.HardLink.*;
/**
* This testing is fairly lightweight. Assumes HardLink routines will
* only be called when permissions etc are okay; no negative testing is
* provided.
*
* These tests all use
* "src" as the source directory,
* "tgt_one" as the target directory for single-file hardlinking, and
* "tgt_mult" as the target directory for multi-file hardlinking.
*
* Contents of them are/will be:
* dir:src:
* files: x1, x2, x3
* dir:tgt_one:
* files: x1 (linked to src/x1), y (linked to src/x2),
* x3 (linked to src/x3), x11 (also linked to src/x1)
* dir:tgt_mult:
* files: x1, x2, x3 (all linked to same name in src/)
*
* NOTICE: This test class only tests the functionality of the OS
* upon which the test is run! (although you're pretty safe with the
* unix-like OS's, unless a typo sneaks in.)
*/
public class TestHardLink {
public static final String TEST_ROOT_DIR =
System.getProperty("test.build.data", "build/test/data") + "/test";
final static private File TEST_DIR = new File(TEST_ROOT_DIR, "hl");
private static String DIR = "dir_";
//define source and target directories
private static File src = new File(TEST_DIR, DIR + "src");
private static File tgt_mult = new File(TEST_DIR, DIR + "tgt_mult");
private static File tgt_one = new File(TEST_DIR, DIR + "tgt_one");
//define source files
private static File x1 = new File(src, "x1");
private static File x2 = new File(src, "x2");
private static File x3 = new File(src, "x3");
//define File objects for the target hardlinks
private static File x1_one = new File(tgt_one, "x1");
private static File y_one = new File(tgt_one, "y");
private static File x3_one = new File(tgt_one, "x3");
private static File x11_one = new File(tgt_one, "x11");
private static File x1_mult = new File(tgt_mult, "x1");
private static File x2_mult = new File(tgt_mult, "x2");
private static File x3_mult = new File(tgt_mult, "x3");
//content strings for file content testing
private static String str1 = "11111";
private static String str2 = "22222";
private static String str3 = "33333";
/**
* Assure clean environment for start of testing
* @throws IOException
*/
@BeforeClass
public static void setupClean() {
//delete source and target directories if they exist
FileUtil.fullyDelete(src);
FileUtil.fullyDelete(tgt_one);
FileUtil.fullyDelete(tgt_mult);
//check that they are gone
assertFalse(src.exists());
assertFalse(tgt_one.exists());
assertFalse(tgt_mult.exists());
}
/**
* Initialize clean environment for start of each test
*/
@Before
public void setupDirs() throws IOException {
//check that we start out with empty top-level test data directory
assertFalse(src.exists());
assertFalse(tgt_one.exists());
assertFalse(tgt_mult.exists());
//make the source and target directories
src.mkdirs();
tgt_one.mkdirs();
tgt_mult.mkdirs();
//create the source files in src, with unique contents per file
makeNonEmptyFile(x1, str1);
makeNonEmptyFile(x2, str2);
makeNonEmptyFile(x3, str3);
//validate
validateSetup();
}
/**
* validate that {@link setupDirs()} produced the expected result
*/
private void validateSetup() throws IOException {
//check existence of source directory and files
assertTrue(src.exists());
assertEquals(3, src.list().length);
assertTrue(x1.exists());
assertTrue(x2.exists());
assertTrue(x3.exists());
//check contents of source files
assertTrue(fetchFileContents(x1).equals(str1));
assertTrue(fetchFileContents(x2).equals(str2));
assertTrue(fetchFileContents(x3).equals(str3));
//check target directories exist and are empty
assertTrue(tgt_one.exists());
assertTrue(tgt_mult.exists());
assertEquals(0, tgt_one.list().length);
assertEquals(0, tgt_mult.list().length);
}
/**
* validate that single-file link operations produced the expected results
*/
private void validateTgtOne() throws IOException {
//check that target directory tgt_one ended up with expected four files
assertTrue(tgt_one.exists());
assertEquals(4, tgt_one.list().length);
assertTrue(x1_one.exists());
assertTrue(x11_one.exists());
assertTrue(y_one.exists());
assertTrue(x3_one.exists());
//confirm the contents of those four files reflects the known contents
//of the files they were hardlinked from.
assertTrue(fetchFileContents(x1_one).equals(str1));
assertTrue(fetchFileContents(x11_one).equals(str1));
assertTrue(fetchFileContents(y_one).equals(str2));
assertTrue(fetchFileContents(x3_one).equals(str3));
}
/**
* validate that multi-file link operations produced the expected results
*/
private void validateTgtMult() throws IOException {
//check that target directory tgt_mult ended up with expected three files
assertTrue(tgt_mult.exists());
assertEquals(3, tgt_mult.list().length);
assertTrue(x1_mult.exists());
assertTrue(x2_mult.exists());
assertTrue(x3_mult.exists());
//confirm the contents of those three files reflects the known contents
//of the files they were hardlinked from.
assertTrue(fetchFileContents(x1_mult).equals(str1));
assertTrue(fetchFileContents(x2_mult).equals(str2));
assertTrue(fetchFileContents(x3_mult).equals(str3));
}
@After
public void tearDown() throws IOException {
setupClean();
}
private void makeNonEmptyFile(File file, String contents)
throws IOException {
FileWriter fw = new FileWriter(file);
fw.write(contents);
fw.close();
}
private void appendToFile(File file, String contents)
throws IOException {
FileWriter fw = new FileWriter(file, true);
fw.write(contents);
fw.close();
}
private String fetchFileContents(File file)
throws IOException {
char[] buf = new char[20];
FileReader fr = new FileReader(file);
int cnt = fr.read(buf);
fr.close();
char[] result = Arrays.copyOf(buf, cnt);
return new String(result);
}
/**
* Sanity check the simplest case of HardLink.getLinkCount()
* to make sure we get back "1" for ordinary single-linked files.
* Tests with multiply-linked files are in later test cases.
*/
@Test
public void testGetLinkCount() throws IOException {
//at beginning of world, check that source files have link count "1"
//since they haven't been hardlinked yet
assertEquals(1, getLinkCount(x1));
assertEquals(1, getLinkCount(x2));
assertEquals(1, getLinkCount(x3));
}
/**
* Test the single-file method HardLink.createHardLink().
* Also tests getLinkCount() with values greater than one.
*/
@Test
public void testCreateHardLink() throws IOException {
//hardlink a single file and confirm expected result
createHardLink(x1, x1_one);
assertTrue(x1_one.exists());
assertEquals(2, getLinkCount(x1)); //x1 and x1_one are linked now
assertEquals(2, getLinkCount(x1_one)); //so they both have count "2"
//confirm that x2, which we didn't change, still shows count "1"
assertEquals(1, getLinkCount(x2));
//now do a few more
createHardLink(x2, y_one);
createHardLink(x3, x3_one);
assertEquals(2, getLinkCount(x2));
assertEquals(2, getLinkCount(x3));
//create another link to a file that already has count 2
createHardLink(x1, x11_one);
assertEquals(3, getLinkCount(x1)); //x1, x1_one, and x11_one
assertEquals(3, getLinkCount(x1_one)); //are all linked, so they
assertEquals(3, getLinkCount(x11_one)); //should all have count "3"
//validate by contents
validateTgtOne();
//validate that change of content is reflected in the other linked files
appendToFile(x1_one, str3);
assertTrue(fetchFileContents(x1_one).equals(str1 + str3));
assertTrue(fetchFileContents(x11_one).equals(str1 + str3));
assertTrue(fetchFileContents(x1).equals(str1 + str3));
}
/*
* Test the multi-file method HardLink.createHardLinkMult(),
* multiple files within a directory into one target directory
*/
@Test
public void testCreateHardLinkMult() throws IOException {
//hardlink a whole list of three files at once
String[] fileNames = src.list();
createHardLinkMult(src, fileNames, tgt_mult);
//validate by link count - each file has been linked once,
//so each count is "2"
assertEquals(2, getLinkCount(x1));
assertEquals(2, getLinkCount(x2));
assertEquals(2, getLinkCount(x3));
assertEquals(2, getLinkCount(x1_mult));
assertEquals(2, getLinkCount(x2_mult));
assertEquals(2, getLinkCount(x3_mult));
//validate by contents
validateTgtMult();
//validate that change of content is reflected in the other linked files
appendToFile(x1_mult, str3);
assertTrue(fetchFileContents(x1_mult).equals(str1 + str3));
assertTrue(fetchFileContents(x1).equals(str1 + str3));
}
/**
* Test createHardLinkMult() with empty list of files.
* We use an extended version of the method call, that
* returns the number of System exec calls made, which should
* be zero in this case.
*/
@Test
public void testCreateHardLinkMultEmptyList() throws IOException {
String[] emptyList = {};
//test the case of empty file list
createHardLinkMult(src, emptyList, tgt_mult);
//check nothing changed in the directory tree
validateSetup();
}
/*
* Assume that this test won't usually be run on a Windows box.
* This test case allows testing of the correct syntax of the Windows
* commands, even though they don't actually get executed on a non-Win box.
* The basic idea is to have enough here that substantive changes will
* fail and the author will fix and add to this test as appropriate.
*
* Depends on the HardLinkCGWin class and member fields being accessible
* from this test method.
*/
@Test
public void testWindowsSyntax() {
class win extends HardLinkCGWin {}
//basic checks on array lengths
assertEquals(4, win.getLinkCountCommand.length);
//make sure "%f" was not munged
assertEquals(2, ("%f").length());
//make sure "\\%f" was munged correctly
assertEquals(3, ("\\%f").length());
assertTrue(win.getLinkCountCommand[1].equals("hardlink"));
//make sure "-c%h" was not munged
assertEquals(4, ("-c%h").length());
}
}
| 11,615 | 34.2 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.net.URI;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
/**
* <p>
* Tests the File Context Statistics for {@link LocalFileSystem}
* </p>
*/
public class TestLocalFsFCStatistics extends FCStatisticsBaseTest {
static final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
@Before
public void setUp() throws Exception {
fc = FileContext.getLocalFSFileContext();
fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true);
}
@After
public void tearDown() throws Exception {
fc.delete(fileContextTestHelper.getTestRootPath(fc, "test"), true);
}
@Override
protected void verifyReadBytes(Statistics stats) {
// one blockSize for read, one for pread
Assert.assertEquals(2*blockSize, stats.getBytesRead());
}
@Override
protected void verifyWrittenBytes(Statistics stats) {
//Extra 12 bytes are written apart from the block.
Assert.assertEquals(blockSize + 12, stats.getBytesWritten());
}
@Override
protected URI getFsUri() {
return URI.create(LOCAL_FS_ROOT_URI);
}
}
| 2,003 | 29.363636 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Tests resolution of AbstractFileSystems for a given path with symlinks.
*/
public class TestFileContextResolveAfs {
static{
FileSystem.enableSymlinks();
}
private static String TEST_ROOT_DIR_LOCAL
= System.getProperty("test.build.data","/tmp");
private FileContext fc;
private FileSystem localFs;
@Before
public void setup() throws IOException {
fc = FileContext.getFileContext();
}
@Test (timeout = 30000)
public void testFileContextResolveAfs() throws IOException {
Configuration conf = new Configuration();
localFs = FileSystem.get(conf);
Path localPath = new Path(TEST_ROOT_DIR_LOCAL + "/TestFileContextResolveAfs1");
Path linkPath = localFs.makeQualified(new Path(TEST_ROOT_DIR_LOCAL,
"TestFileContextResolveAfs2"));
localFs.mkdirs(new Path(TEST_ROOT_DIR_LOCAL));
localFs.create(localPath);
fc.createSymlink(localPath, linkPath, true);
Set<AbstractFileSystem> afsList = fc.resolveAbstractFileSystems(linkPath);
Assert.assertEquals(1, afsList.size());
localFs.deleteOnExit(localPath);
localFs.deleteOnExit(linkPath);
localFs.close();
}
}
| 2,167 | 31.358209 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.test.GenericTestUtils;
/**
* <p>
* A collection of tests for the {@link FileContext}, create method
* This test should be used for testing an instance of FileContext
* that has been initialized to a specific default FileSystem such a
* LocalFileSystem, HDFS,S3, etc.
* </p>
* <p>
* To test a given {@link FileSystem} implementation create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fc</code>
* {@link FileContext} instance variable.
*
* Since this a junit 4 you can also do a single setup before
* the start of any tests.
* E.g.
* @BeforeClass public static void clusterSetupAtBegining()
* @AfterClass public static void ClusterShutdownAtEnd()
* </p>
*/
public abstract class FileContextCreateMkdirBaseTest {
protected final FileContextTestHelper fileContextTestHelper;
protected static FileContext fc;
static {
GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
}
public FileContextCreateMkdirBaseTest() {
fileContextTestHelper = createFileContextHelper();
}
protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper();
}
@Before
public void setUp() throws Exception {
fc.mkdir(getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
}
@After
public void tearDown() throws Exception {
fc.delete(getTestRootPath(fc), true);
}
///////////////////////
// Test Mkdir
////////////////////////
@Test
public void testMkdirNonRecursiveWithExistingDir() throws IOException {
Path f = getTestRootPath(fc, "aDir");
fc.mkdir(f, FileContext.DEFAULT_PERM, false);
Assert.assertTrue(isDir(fc, f));
}
@Test
public void testMkdirNonRecursiveWithNonExistingDir() {
try {
fc.mkdir(getTestRootPath(fc,"NonExistant/aDir"),
FileContext.DEFAULT_PERM, false);
Assert.fail("Mkdir with non existing parent dir should have failed");
} catch (IOException e) {
// failed As expected
}
}
@Test
public void testMkdirRecursiveWithExistingDir() throws IOException {
Path f = getTestRootPath(fc, "aDir");
fc.mkdir(f, FileContext.DEFAULT_PERM, true);
Assert.assertTrue(isDir(fc, f));
}
@Test
public void testMkdirRecursiveWithNonExistingDir() throws IOException {
Path f = getTestRootPath(fc, "NonExistant2/aDir");
fc.mkdir(f, FileContext.DEFAULT_PERM, true);
Assert.assertTrue(isDir(fc, f));
}
///////////////////////
// Test Create
////////////////////////
@Test
public void testCreateNonRecursiveWithExistingDir() throws IOException {
Path f = getTestRootPath(fc, "foo");
createFile(fc, f);
Assert.assertTrue(isFile(fc, f));
}
@Test
public void testCreateNonRecursiveWithNonExistingDir() {
try {
createFileNonRecursive(fc, getTestRootPath(fc, "NonExisting/foo"));
Assert.fail("Create with non existing parent dir should have failed");
} catch (IOException e) {
// As expected
}
}
@Test
public void testCreateRecursiveWithExistingDir() throws IOException {
Path f = getTestRootPath(fc,"foo");
createFile(fc, f);
Assert.assertTrue(isFile(fc, f));
}
@Test
public void testCreateRecursiveWithNonExistingDir() throws IOException {
Path f = getTestRootPath(fc,"NonExisting/foo");
createFile(fc, f);
Assert.assertTrue(isFile(fc, f));
}
private Path getTestRootPath(FileContext fc) {
return fileContextTestHelper.getTestRootPath(fc);
}
private Path getTestRootPath(FileContext fc, String pathString) {
return fileContextTestHelper.getTestRootPath(fc, pathString);
}
}
| 4,860 | 28.460606 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsPermission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.After;
import org.junit.Before;
/**
* Test permissions for localFs using FileContext API.
*/
public class TestFcLocalFsPermission extends
FileContextPermissionBase {
@Override
@Before
public void setUp() throws Exception {
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
}
@Override
protected FileContext getFileContext() throws UnsupportedFileSystemException {
return FileContext.getLocalFSFileContext();
}
}
| 1,355 | 27.851064 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.FileContextTestHelper.createFile;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.base.Supplier;
import com.google.common.util.concurrent.Uninterruptibles;
/**
* <p>
* Base class to test {@link FileContext} Statistics.
* </p>
*/
public abstract class FCStatisticsBaseTest {
static protected int blockSize = 512;
static protected int numBlocks = 1;
protected final FileContextTestHelper fileContextTestHelper = new FileContextTestHelper();
//fc should be set appropriately by the deriving test.
protected static FileContext fc = null;
@Test(timeout=60000)
public void testStatisticsOperations() throws Exception {
final Statistics stats = new Statistics("file");
Assert.assertEquals(0L, stats.getBytesRead());
Assert.assertEquals(0L, stats.getBytesWritten());
Assert.assertEquals(0, stats.getWriteOps());
stats.incrementBytesWritten(1000);
Assert.assertEquals(1000L, stats.getBytesWritten());
Assert.assertEquals(0, stats.getWriteOps());
stats.incrementWriteOps(123);
Assert.assertEquals(123, stats.getWriteOps());
Thread thread = new Thread() {
@Override
public void run() {
stats.incrementWriteOps(1);
}
};
thread.start();
Uninterruptibles.joinUninterruptibly(thread);
Assert.assertEquals(124, stats.getWriteOps());
// Test copy constructor and reset function
Statistics stats2 = new Statistics(stats);
stats.reset();
Assert.assertEquals(0, stats.getWriteOps());
Assert.assertEquals(0L, stats.getBytesWritten());
Assert.assertEquals(0L, stats.getBytesRead());
Assert.assertEquals(124, stats2.getWriteOps());
Assert.assertEquals(1000L, stats2.getBytesWritten());
Assert.assertEquals(0L, stats2.getBytesRead());
}
@Test
public void testStatistics() throws IOException, URISyntaxException {
URI fsUri = getFsUri();
Statistics stats = FileContext.getStatistics(fsUri);
Assert.assertEquals(0, stats.getBytesRead());
Path filePath = fileContextTestHelper .getTestRootPath(fc, "file1");
createFile(fc, filePath, numBlocks, blockSize);
Assert.assertEquals(0, stats.getBytesRead());
verifyWrittenBytes(stats);
FSDataInputStream fstr = fc.open(filePath);
byte[] buf = new byte[blockSize];
int bytesRead = fstr.read(buf, 0, blockSize);
fstr.read(0, buf, 0, blockSize);
Assert.assertEquals(blockSize, bytesRead);
verifyReadBytes(stats);
verifyWrittenBytes(stats);
verifyReadBytes(FileContext.getStatistics(getFsUri()));
Map<URI, Statistics> statsMap = FileContext.getAllStatistics();
URI exactUri = getSchemeAuthorityUri();
verifyWrittenBytes(statsMap.get(exactUri));
fc.delete(filePath, true);
}
@Test(timeout=60000)
public void testStatisticsThreadLocalDataCleanUp() throws Exception {
final Statistics stats = new Statistics("test");
// create a small thread pool to test the statistics
final int size = 2;
ExecutorService es = Executors.newFixedThreadPool(size);
List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>(size);
for (int i = 0; i < size; i++) {
tasks.add(new Callable<Boolean>() {
public Boolean call() {
// this populates the data set in statistics
stats.incrementReadOps(1);
return true;
}
});
}
// run the threads
es.invokeAll(tasks);
// assert that the data size is exactly the number of threads
final AtomicInteger allDataSize = new AtomicInteger(0);
allDataSize.set(stats.getAllThreadLocalDataSize());
Assert.assertEquals(size, allDataSize.get());
Assert.assertEquals(size, stats.getReadOps());
// force the GC to collect the threads by shutting down the thread pool
es.shutdownNow();
es.awaitTermination(1, TimeUnit.MINUTES);
es = null;
System.gc();
// wait for up to 10 seconds
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
int size = stats.getAllThreadLocalDataSize();
allDataSize.set(size);
return size == 0;
}
}, 1000, 10*1000);
Assert.assertEquals(0, allDataSize.get());
Assert.assertEquals(size, stats.getReadOps());
}
/**
* Bytes read may be different for different file systems. This method should
* throw assertion error if bytes read are incorrect.
*
* @param stats
*/
protected abstract void verifyReadBytes(Statistics stats);
/**
* Bytes written may be different for different file systems. This method should
* throw assertion error if bytes written are incorrect.
*
* @param stats
*/
protected abstract void verifyWrittenBytes(Statistics stats);
/**
* Returns the filesystem uri. Should be set
* @return URI
*/
protected abstract URI getFsUri();
protected URI getSchemeAuthorityUri() {
URI uri = getFsUri();
String SchemeAuthString = uri.getScheme() + "://";
if (uri.getAuthority() == null) {
SchemeAuthString += "/";
} else {
SchemeAuthString += uri.getAuthority();
}
return URI.create(SchemeAuthString);
}
}
| 6,522 | 33.696809 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedWriter;
import java.io.OutputStreamWriter;
import org.apache.hadoop.conf.Configuration;
import junit.framework.TestCase;
public class TestAvroFSInput extends TestCase {
private static final String INPUT_DIR = "AvroFSInput";
private Path getInputPath() {
String dataDir = System.getProperty("test.build.data");
if (null == dataDir) {
return new Path(INPUT_DIR);
} else {
return new Path(new Path(dataDir), INPUT_DIR);
}
}
public void testAFSInput() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path dir = getInputPath();
if (!fs.exists(dir)) {
fs.mkdirs(dir);
}
Path filePath = new Path(dir, "foo");
if (fs.exists(filePath)) {
fs.delete(filePath, false);
}
FSDataOutputStream ostream = fs.create(filePath);
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(ostream));
w.write("0123456789");
w.close();
// Create the stream
FileContext fc = FileContext.getFileContext(conf);
AvroFSInput avroFSIn = new AvroFSInput(fc, filePath);
assertEquals(10, avroFSIn.length());
// Check initial position
byte [] buf = new byte[1];
assertEquals(0, avroFSIn.tell());
// Check a read from that position.
avroFSIn.read(buf, 0, 1);
assertEquals(1, avroFSIn.tell());
assertEquals('0', (char)buf[0]);
// Check a seek + read
avroFSIn.seek(4);
assertEquals(4, avroFSIn.tell());
avroFSIn.read(buf, 0, 1);
assertEquals('4', (char)buf[0]);
assertEquals(5, avroFSIn.tell());
avroFSIn.close();
}
}
| 2,490 | 27.306818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegationTokenRenewer.Renewable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Time;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
public class TestDelegationTokenRenewer {
public abstract class RenewableFileSystem extends FileSystem
implements Renewable { }
private static final long RENEW_CYCLE = 1000;
private DelegationTokenRenewer renewer;
Configuration conf;
FileSystem fs;
@Before
public void setup() {
DelegationTokenRenewer.renewCycle = RENEW_CYCLE;
DelegationTokenRenewer.reset();
renewer = DelegationTokenRenewer.getInstance();
}
@SuppressWarnings("unchecked")
@Test
public void testAddRemoveRenewAction() throws IOException,
InterruptedException {
Text service = new Text("myservice");
Configuration conf = mock(Configuration.class);
Token<?> token = mock(Token.class);
doReturn(service).when(token).getService();
doAnswer(new Answer<Long>() {
public Long answer(InvocationOnMock invocation) {
return Time.now() + RENEW_CYCLE;
}
}).when(token).renew(any(Configuration.class));
RenewableFileSystem fs = mock(RenewableFileSystem.class);
doReturn(conf).when(fs).getConf();
doReturn(token).when(fs).getRenewToken();
renewer.addRenewAction(fs);
assertEquals("FileSystem not added to DelegationTokenRenewer", 1,
renewer.getRenewQueueLength());
Thread.sleep(RENEW_CYCLE*2);
verify(token, atLeast(2)).renew(eq(conf));
verify(token, atMost(3)).renew(eq(conf));
verify(token, never()).cancel(any(Configuration.class));
renewer.removeRenewAction(fs);
verify(token).cancel(eq(conf));
verify(fs, never()).getDelegationToken(null);
verify(fs, never()).setDelegationToken(any(Token.class));
assertEquals("FileSystem not removed from DelegationTokenRenewer", 0,
renewer.getRenewQueueLength());
}
@Test
public void testAddRenewActionWithNoToken() throws IOException,
InterruptedException {
Configuration conf = mock(Configuration.class);
RenewableFileSystem fs = mock(RenewableFileSystem.class);
doReturn(conf).when(fs).getConf();
doReturn(null).when(fs).getRenewToken();
renewer.addRenewAction(fs);
verify(fs).getRenewToken();
assertEquals(0, renewer.getRenewQueueLength());
}
@Test
public void testGetNewTokenOnRenewFailure() throws IOException,
InterruptedException {
Text service = new Text("myservice");
Configuration conf = mock(Configuration.class);
final Token<?> token1 = mock(Token.class);
doReturn(service).when(token1).getService();
doThrow(new IOException("boom")).when(token1).renew(eq(conf));
final Token<?> token2 = mock(Token.class);
doReturn(service).when(token2).getService();
doAnswer(new Answer<Long>() {
public Long answer(InvocationOnMock invocation) {
return Time.now() + RENEW_CYCLE;
}
}).when(token2).renew(eq(conf));
RenewableFileSystem fs = mock(RenewableFileSystem.class);
doReturn(conf).when(fs).getConf();
doReturn(token1).doReturn(token2).when(fs).getRenewToken();
doReturn(token2).when(fs).getDelegationToken(null);
doAnswer(new Answer<Token<?>[]>() {
public Token<?>[] answer(InvocationOnMock invocation) {
return new Token<?>[]{token2};
}
}).when(fs).addDelegationTokens(null, null);
renewer.addRenewAction(fs);
assertEquals(1, renewer.getRenewQueueLength());
Thread.sleep(RENEW_CYCLE);
verify(fs).getRenewToken();
verify(token1, atLeast(1)).renew(eq(conf));
verify(token1, atMost(2)).renew(eq(conf));
verify(fs).addDelegationTokens(null, null);
verify(fs).setDelegationToken(eq(token2));
assertEquals(1, renewer.getRenewQueueLength());
renewer.removeRenewAction(fs);
verify(token2).cancel(eq(conf));
assertEquals(0, renewer.getRenewQueueLength());
}
@Test
public void testStopRenewalWhenFsGone() throws IOException,
InterruptedException {
Configuration conf = mock(Configuration.class);
Token<?> token = mock(Token.class);
doReturn(new Text("myservice")).when(token).getService();
doAnswer(new Answer<Long>() {
public Long answer(InvocationOnMock invocation) {
return Time.now() + RENEW_CYCLE;
}
}).when(token).renew(any(Configuration.class));
RenewableFileSystem fs = mock(RenewableFileSystem.class);
doReturn(conf).when(fs).getConf();
doReturn(token).when(fs).getRenewToken();
renewer.addRenewAction(fs);
assertEquals(1, renewer.getRenewQueueLength());
Thread.sleep(RENEW_CYCLE);
verify(token, atLeast(1)).renew(eq(conf));
verify(token, atMost(2)).renew(eq(conf));
// drop weak ref
fs = null;
System.gc(); System.gc(); System.gc();
// next renew should detect the fs as gone
Thread.sleep(RENEW_CYCLE);
verify(token, atLeast(1)).renew(eq(conf));
verify(token, atMost(2)).renew(eq(conf));
assertEquals(0, renewer.getRenewQueueLength());
}
@Test(timeout=4000)
public void testMultipleTokensDoNotDeadlock() throws IOException,
InterruptedException {
Configuration conf = mock(Configuration.class);
FileSystem fs = mock(FileSystem.class);
doReturn(conf).when(fs).getConf();
long distantFuture = Time.now() + 3600 * 1000; // 1h
Token<?> token1 = mock(Token.class);
doReturn(new Text("myservice1")).when(token1).getService();
doReturn(distantFuture).when(token1).renew(eq(conf));
Token<?> token2 = mock(Token.class);
doReturn(new Text("myservice2")).when(token2).getService();
doReturn(distantFuture).when(token2).renew(eq(conf));
RenewableFileSystem fs1 = mock(RenewableFileSystem.class);
doReturn(conf).when(fs1).getConf();
doReturn(token1).when(fs1).getRenewToken();
RenewableFileSystem fs2 = mock(RenewableFileSystem.class);
doReturn(conf).when(fs2).getConf();
doReturn(token2).when(fs2).getRenewToken();
renewer.addRenewAction(fs1);
renewer.addRenewAction(fs2);
assertEquals(2, renewer.getRenewQueueLength());
renewer.removeRenewAction(fs1);
assertEquals(1, renewer.getRenewQueueLength());
renewer.removeRenewAction(fs2);
assertEquals(0, renewer.getRenewQueueLength());
verify(token1).cancel(eq(conf));
verify(token2).cancel(eq(conf));
}
}
| 7,522 | 33.195455 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.util.Shell;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
public class TestSymlinkLocalFSFileSystem extends TestSymlinkLocalFS {
@BeforeClass
public static void testSetup() throws Exception {
FileSystem filesystem = FileSystem.getLocal(new Configuration());
wrapper = new FileSystemTestWrapper(filesystem);
}
@Ignore("RawLocalFileSystem#mkdir does not treat existence of directory" +
" as an error")
@Override
@Test(timeout=1000)
public void testMkdirExistingLink() throws IOException {}
@Ignore("FileSystem#create defaults to creating parents," +
" throwing an IOException instead of FileNotFoundException")
@Override
@Test(timeout=1000)
public void testCreateFileViaDanglingLinkParent() throws IOException {}
@Ignore("RawLocalFileSystem does not throw an exception if the path" +
" already exists")
@Override
@Test(timeout=1000)
public void testCreateFileDirExistingLink() throws IOException {}
@Ignore("ChecksumFileSystem does not support append")
@Override
@Test(timeout=1000)
public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {}
@Override
public void testRenameFileWithDestParentSymlink() throws IOException {
assumeTrue(!Shell.WINDOWS);
super.testRenameFileWithDestParentSymlink();
}
@Override
@Test(timeout=10000)
/** Rename a symlink to itself */
public void testRenameSymlinkToItself() throws IOException {
Path file = new Path(testBaseDir1(), "file");
createAndWriteFile(file);
Path link = new Path(testBaseDir1(), "linkToFile1");
wrapper.createSymlink(file, link, false);
try {
wrapper.rename(link, link);
fail("Failed to get expected IOException");
} catch (IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Fails with overwrite as well
try {
wrapper.rename(link, link, Rename.OVERWRITE);
fail("Failed to get expected IOException");
} catch (IOException e) {
// Todo: Fix this test when HADOOP-9819 is fixed.
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException ||
unwrapException(e) instanceof FileNotFoundException);
}
}
}
| 3,393 | 33.989691 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
/**
* <p>
* A collection of tests for the contract of the {@link FileSystem}.
* This test should be used for general-purpose implementations of
* {@link FileSystem}, that is, implementations that provide implementations
* of all of the functionality of {@link FileSystem}.
* </p>
* <p>
* To test a given {@link FileSystem} implementation create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fs</code>
* {@link FileSystem} instance variable.
* </p>
*/
public abstract class FileSystemContractBaseTest extends TestCase {
private static final Log LOG =
LogFactory.getLog(FileSystemContractBaseTest.class);
protected final static String TEST_UMASK = "062";
protected FileSystem fs;
protected byte[] data = dataset(getBlockSize() * 2, 0, 255);
@Override
protected void tearDown() throws Exception {
fs.delete(path("/test"), true);
}
protected int getBlockSize() {
return 1024;
}
protected String getDefaultWorkingDirectory() {
return "/user/" + System.getProperty("user.name");
}
protected boolean renameSupported() {
return true;
}
public void testFsStatus() throws Exception {
FsStatus fsStatus = fs.getStatus();
assertNotNull(fsStatus);
//used, free and capacity are non-negative longs
assertTrue(fsStatus.getUsed() >= 0);
assertTrue(fsStatus.getRemaining() >= 0);
assertTrue(fsStatus.getCapacity() >= 0);
}
public void testWorkingDirectory() throws Exception {
Path workDir = path(getDefaultWorkingDirectory());
assertEquals(workDir, fs.getWorkingDirectory());
fs.setWorkingDirectory(path("."));
assertEquals(workDir, fs.getWorkingDirectory());
fs.setWorkingDirectory(path(".."));
assertEquals(workDir.getParent(), fs.getWorkingDirectory());
Path relativeDir = path("hadoop");
fs.setWorkingDirectory(relativeDir);
assertEquals(relativeDir, fs.getWorkingDirectory());
Path absoluteDir = path("/test/hadoop");
fs.setWorkingDirectory(absoluteDir);
assertEquals(absoluteDir, fs.getWorkingDirectory());
}
public void testMkdirs() throws Exception {
Path testDir = path("/test/hadoop");
assertFalse(fs.exists(testDir));
assertFalse(fs.isFile(testDir));
assertTrue(fs.mkdirs(testDir));
assertTrue(fs.exists(testDir));
assertFalse(fs.isFile(testDir));
assertTrue(fs.mkdirs(testDir));
assertTrue(fs.exists(testDir));
assertFalse(fs.isFile(testDir));
Path parentDir = testDir.getParent();
assertTrue(fs.exists(parentDir));
assertFalse(fs.isFile(parentDir));
Path grandparentDir = parentDir.getParent();
assertTrue(fs.exists(grandparentDir));
assertFalse(fs.isFile(grandparentDir));
}
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = path("/test/hadoop");
assertFalse(fs.exists(testDir));
assertTrue(fs.mkdirs(testDir));
assertTrue(fs.exists(testDir));
createFile(path("/test/hadoop/file"));
Path testSubDir = path("/test/hadoop/file/subdir");
try {
fs.mkdirs(testSubDir);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
assertFalse(fs.exists(testSubDir));
Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
try {
fs.mkdirs(testDeepSubDir);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
assertFalse(fs.exists(testDeepSubDir));
}
public void testMkdirsWithUmask() throws Exception {
if (fs.getScheme().equals("s3") || fs.getScheme().equals("s3n")) {
// skip permission tests for S3FileSystem until HDFS-1333 is fixed.
return;
}
Configuration conf = fs.getConf();
String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
try {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
final Path dir = new Path("/test/newDir");
assertTrue(fs.mkdirs(dir, new FsPermission((short)0777)));
FileStatus status = fs.getFileStatus(dir);
assertTrue(status.isDirectory());
assertEquals((short)0715, status.getPermission().toShort());
} finally {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
}
}
public void testGetFileStatusThrowsExceptionForNonExistentFile()
throws Exception {
try {
fs.getFileStatus(path("/test/hadoop/file"));
fail("Should throw FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
}
}
public void testListStatusThrowsExceptionForNonExistentFile() throws Exception {
try {
fs.listStatus(path("/test/hadoop/file"));
fail("Should throw FileNotFoundException");
} catch (FileNotFoundException fnfe) {
// expected
}
}
public void testListStatus() throws Exception {
Path[] testDirs = { path("/test/hadoop/a"),
path("/test/hadoop/b"),
path("/test/hadoop/c/1"), };
assertFalse(fs.exists(testDirs[0]));
for (Path path : testDirs) {
assertTrue(fs.mkdirs(path));
}
FileStatus[] paths = fs.listStatus(path("/test"));
assertEquals(1, paths.length);
assertEquals(path("/test/hadoop"), paths[0].getPath());
paths = fs.listStatus(path("/test/hadoop"));
assertEquals(3, paths.length);
assertEquals(path("/test/hadoop/a"), paths[0].getPath());
assertEquals(path("/test/hadoop/b"), paths[1].getPath());
assertEquals(path("/test/hadoop/c"), paths[2].getPath());
paths = fs.listStatus(path("/test/hadoop/a"));
assertEquals(0, paths.length);
}
public void testWriteReadAndDeleteEmptyFile() throws Exception {
writeReadAndDelete(0);
}
public void testWriteReadAndDeleteHalfABlock() throws Exception {
writeReadAndDelete(getBlockSize() / 2);
}
public void testWriteReadAndDeleteOneBlock() throws Exception {
writeReadAndDelete(getBlockSize());
}
public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
writeReadAndDelete(getBlockSize() + (getBlockSize() / 2));
}
public void testWriteReadAndDeleteTwoBlocks() throws Exception {
writeReadAndDelete(getBlockSize() * 2);
}
/**
* Write a dataset, read it back in and verify that they match.
* Afterwards, the file is deleted.
* @param len length of data
* @throws IOException on IO failures
*/
protected void writeReadAndDelete(int len) throws IOException {
Path path = path("/test/hadoop/file");
writeAndRead(path, data, len, false, true);
}
public void testOverwrite() throws IOException {
Path path = path("/test/hadoop/file");
fs.mkdirs(path.getParent());
createFile(path);
assertTrue("Exists", fs.exists(path));
assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
try {
fs.create(path, false).close();
fail("Should throw IOException.");
} catch (IOException e) {
// Expected
}
FSDataOutputStream out = fs.create(path, true);
out.write(data, 0, data.length);
out.close();
assertTrue("Exists", fs.exists(path));
assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
}
public void testWriteInNonExistentDirectory() throws IOException {
Path path = path("/test/hadoop/file");
assertFalse("Parent exists", fs.exists(path.getParent()));
createFile(path);
assertTrue("Exists", fs.exists(path));
assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
assertTrue("Parent exists", fs.exists(path.getParent()));
}
public void testDeleteNonExistentFile() throws IOException {
Path path = path("/test/hadoop/file");
assertFalse("Path exists: " + path, fs.exists(path));
assertFalse("No deletion", fs.delete(path, true));
}
public void testDeleteRecursively() throws IOException {
Path dir = path("/test/hadoop");
Path file = path("/test/hadoop/file");
Path subdir = path("/test/hadoop/subdir");
createFile(file);
assertTrue("Created subdir", fs.mkdirs(subdir));
assertTrue("File exists", fs.exists(file));
assertTrue("Dir exists", fs.exists(dir));
assertTrue("Subdir exists", fs.exists(subdir));
try {
fs.delete(dir, false);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
assertTrue("File still exists", fs.exists(file));
assertTrue("Dir still exists", fs.exists(dir));
assertTrue("Subdir still exists", fs.exists(subdir));
assertTrue("Deleted", fs.delete(dir, true));
assertFalse("File doesn't exist", fs.exists(file));
assertFalse("Dir doesn't exist", fs.exists(dir));
assertFalse("Subdir doesn't exist", fs.exists(subdir));
}
public void testDeleteEmptyDirectory() throws IOException {
Path dir = path("/test/hadoop");
assertTrue(fs.mkdirs(dir));
assertTrue("Dir exists", fs.exists(dir));
assertTrue("Deleted", fs.delete(dir, false));
assertFalse("Dir doesn't exist", fs.exists(dir));
}
public void testRenameNonExistentPath() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/path");
Path dst = path("/test/new/newpath");
rename(src, dst, false, false, false);
}
public void testRenameFileMoveToNonExistentDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
createFile(src);
Path dst = path("/test/new/newfile");
rename(src, dst, false, true, false);
}
public void testRenameFileMoveToExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
createFile(src);
Path dst = path("/test/new/newfile");
fs.mkdirs(dst.getParent());
rename(src, dst, true, false, true);
}
public void testRenameFileAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
createFile(src);
Path dst = path("/test/new/newfile");
createFile(dst);
rename(src, dst, false, true, true);
}
public void testRenameFileAsExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
createFile(src);
Path dst = path("/test/new/newdir");
fs.mkdirs(dst);
rename(src, dst, true, false, true);
assertTrue("Destination changed",
fs.exists(path("/test/new/newdir/file")));
}
public void testRenameDirectoryMoveToNonExistentDirectory()
throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/dir");
fs.mkdirs(src);
Path dst = path("/test/new/newdir");
rename(src, dst, false, true, false);
}
public void testRenameDirectoryMoveToExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/dir");
fs.mkdirs(src);
createFile(path("/test/hadoop/dir/file1"));
createFile(path("/test/hadoop/dir/subdir/file2"));
Path dst = path("/test/new/newdir");
fs.mkdirs(dst.getParent());
rename(src, dst, true, false, true);
assertFalse("Nested file1 exists",
fs.exists(path("/test/hadoop/dir/file1")));
assertFalse("Nested file2 exists",
fs.exists(path("/test/hadoop/dir/subdir/file2")));
assertTrue("Renamed nested file1 exists",
fs.exists(path("/test/new/newdir/file1")));
assertTrue("Renamed nested exists",
fs.exists(path("/test/new/newdir/subdir/file2")));
}
public void testRenameDirectoryAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/dir");
fs.mkdirs(src);
Path dst = path("/test/new/newfile");
createFile(dst);
rename(src, dst, false, true, true);
}
public void testRenameDirectoryAsExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/dir");
fs.mkdirs(src);
createFile(path("/test/hadoop/dir/file1"));
createFile(path("/test/hadoop/dir/subdir/file2"));
Path dst = path("/test/new/newdir");
fs.mkdirs(dst);
rename(src, dst, true, false, true);
assertTrue("Destination changed",
fs.exists(path("/test/new/newdir/dir")));
assertFalse("Nested file1 exists",
fs.exists(path("/test/hadoop/dir/file1")));
assertFalse("Nested file2 exists",
fs.exists(path("/test/hadoop/dir/subdir/file2")));
assertTrue("Renamed nested file1 exists",
fs.exists(path("/test/new/newdir/dir/file1")));
assertTrue("Renamed nested exists",
fs.exists(path("/test/new/newdir/dir/subdir/file2")));
}
public void testInputStreamClosedTwice() throws IOException {
//HADOOP-4760 according to Closeable#close() closing already-closed
//streams should have no effect.
Path src = path("/test/hadoop/file");
createFile(src);
FSDataInputStream in = fs.open(src);
in.close();
in.close();
}
public void testOutputStreamClosedTwice() throws IOException {
//HADOOP-4760 according to Closeable#close() closing already-closed
//streams should have no effect.
Path src = path("/test/hadoop/file");
FSDataOutputStream out = fs.create(src);
out.writeChar('H'); //write some data
out.close();
out.close();
}
protected Path path(String pathString) {
return new Path(pathString).makeQualified(fs);
}
protected void createFile(Path path) throws IOException {
FSDataOutputStream out = fs.create(path);
out.write(data, 0, data.length);
out.close();
}
protected void rename(Path src, Path dst, boolean renameSucceeded,
boolean srcExists, boolean dstExists) throws IOException {
assertEquals("Rename result", renameSucceeded, fs.rename(src, dst));
assertEquals("Source exists", srcExists, fs.exists(src));
assertEquals("Destination exists" + dst, dstExists, fs.exists(dst));
}
/**
* Verify that if you take an existing file and overwrite it, the new values
* get picked up.
* This is a test for the behavior of eventually consistent
* filesystems.
*
* @throws Exception on any failure
*/
public void testOverWriteAndRead() throws Exception {
int blockSize = getBlockSize();
byte[] filedata1 = dataset(blockSize * 2, 'A', 26);
byte[] filedata2 = dataset(blockSize * 2, 'a', 26);
Path path = path("/test/hadoop/file-overwrite");
writeAndRead(path, filedata1, blockSize, true, false);
writeAndRead(path, filedata2, blockSize, true, false);
writeAndRead(path, filedata1, blockSize * 2, true, false);
writeAndRead(path, filedata2, blockSize * 2, true, false);
writeAndRead(path, filedata1, blockSize, true, false);
writeAndRead(path, filedata2, blockSize * 2, true, false);
}
/**
*
* Write a file and read it in, validating the result. Optional flags control
* whether file overwrite operations should be enabled, and whether the
* file should be deleted afterwards.
*
* If there is a mismatch between what was written and what was expected,
* a small range of bytes either side of the first error are logged to aid
* diagnosing what problem occurred -whether it was a previous file
* or a corrupting of the current file. This assumes that two
* sequential runs to the same path use datasets with different character
* moduli.
*
* @param path path to write to
* @param len length of data
* @param overwrite should the create option allow overwrites?
* @param delete should the file be deleted afterwards? -with a verification
* that it worked. Deletion is not attempted if an assertion has failed
* earlier -it is not in a <code>finally{}</code> block.
* @throws IOException IO problems
*/
protected void writeAndRead(Path path, byte[] src, int len,
boolean overwrite,
boolean delete) throws IOException {
assertTrue("Not enough data in source array to write " + len + " bytes",
src.length >= len);
fs.mkdirs(path.getParent());
FSDataOutputStream out = fs.create(path, overwrite,
fs.getConf().getInt("io.file.buffer.size",
4096),
(short) 1, getBlockSize());
out.write(src, 0, len);
out.close();
assertTrue("Exists", fs.exists(path));
assertEquals("Length", len, fs.getFileStatus(path).getLen());
FSDataInputStream in = fs.open(path);
byte[] buf = new byte[len];
in.readFully(0, buf);
in.close();
assertEquals(len, buf.length);
int errors = 0;
int first_error_byte = -1;
for (int i = 0; i < len; i++) {
if (src[i] != buf[i]) {
if (errors == 0) {
first_error_byte = i;
}
errors++;
}
}
if (errors > 0) {
String message = String.format(" %d errors in file of length %d",
errors, len);
LOG.warn(message);
// the range either side of the first error to print
// this is a purely arbitrary number, to aid user debugging
final int overlap = 10;
for (int i = Math.max(0, first_error_byte - overlap);
i < Math.min(first_error_byte + overlap, len);
i++) {
byte actual = buf[i];
byte expected = src[i];
String letter = toChar(actual);
String line = String.format("[%04d] %2x %s\n", i, actual, letter);
if (expected != actual) {
line = String.format("[%04d] %2x %s -expected %2x %s\n",
i,
actual,
letter,
expected,
toChar(expected));
}
LOG.warn(line);
}
fail(message);
}
if (delete) {
boolean deleted = fs.delete(path, false);
assertTrue("Deleted", deleted);
assertFalse("No longer exists", fs.exists(path));
}
}
/**
* Convert a byte to a character for printing. If the
* byte value is < 32 -and hence unprintable- the byte is
* returned as a two digit hex value
* @param b byte
* @return the printable character string
*/
protected String toChar(byte b) {
if (b >= 0x20) {
return Character.toString((char) b);
} else {
return String.format("%02x", b);
}
}
/**
* Create a dataset for use in the tests; all data is in the range
* base to (base+modulo-1) inclusive
* @param len length of data
* @param base base of the data
* @param modulo the modulo
* @return the newly generated dataset
*/
protected byte[] dataset(int len, int base, int modulo) {
byte[] dataset = new byte[len];
for (int i = 0; i < len; i++) {
dataset[i] = (byte) (base + (i % modulo));
}
return dataset;
}
}
| 20,263 | 31.842788 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.net.URI;
import java.util.EnumSet;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFilterFileSystem {
private static final Log LOG = FileSystem.LOG;
private static final Configuration conf = new Configuration();
@BeforeClass
public static void setup() {
conf.set("fs.flfs.impl", FilterLocalFileSystem.class.getName());
conf.setBoolean("fs.flfs.impl.disable.cache", true);
conf.setBoolean("fs.file.impl.disable.cache", true);
}
public static class DontCheck {
public BlockLocation[] getFileBlockLocations(Path p,
long start, long len) { return null; }
public FsServerDefaults getServerDefaults() { return null; }
public long getLength(Path f) { return 0; }
public FSDataOutputStream append(Path f) { return null; }
public FSDataOutputStream append(Path f, int bufferSize) { return null; }
public void rename(final Path src, final Path dst, final Rename... options) { }
public boolean exists(Path f) { return false; }
public boolean isDirectory(Path f) { return false; }
public boolean isFile(Path f) { return false; }
public boolean createNewFile(Path f) { return false; }
public FSDataOutputStream createNonRecursive(Path f,
boolean overwrite,
int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress, ChecksumOpt checksumOpt) throws IOException {
return null;
}
public boolean mkdirs(Path f) { return false; }
public FSDataInputStream open(Path f) { return null; }
public FSDataOutputStream create(Path f) { return null; }
public FSDataOutputStream create(Path f, boolean overwrite) { return null; }
public FSDataOutputStream create(Path f, Progressable progress) {
return null;
}
public FSDataOutputStream create(Path f, short replication) {
return null;
}
public FSDataOutputStream create(Path f, short replication,
Progressable progress) {
return null;
}
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize) {
return null;
}
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize,
Progressable progress) {
return null;
}
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize,
short replication,
long blockSize) {
return null;
}
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
Progressable progress) {
return null;
}
public FSDataOutputStream create(Path f,
FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
Progressable progress) {
return null;
}
public FSDataOutputStream create(Path f,
FsPermission permission,
EnumSet<CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
Progressable progress) throws IOException {
return null;
}
public FSDataOutputStream create(Path f,
FsPermission permission,
EnumSet<CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
return null;
}
public String getName() { return null; }
public boolean delete(Path f) { return false; }
public short getReplication(Path src) { return 0 ; }
public void processDeleteOnExit() { }
public ContentSummary getContentSummary(Path f) { return null; }
public FsStatus getStatus() { return null; }
public FileStatus[] listStatus(Path f, PathFilter filter) { return null; }
public FileStatus[] listStatus(Path[] files) { return null; }
public FileStatus[] listStatus(Path[] files, PathFilter filter) { return null; }
public FileStatus[] globStatus(Path pathPattern) { return null; }
public FileStatus[] globStatus(Path pathPattern, PathFilter filter) {
return null;
}
public Iterator<LocatedFileStatus> listFiles(
final Path path, final boolean isRecursive) {
return null;
}
public Iterator<LocatedFileStatus> listLocatedStatus(Path f) {
return null;
}
public Iterator<LocatedFileStatus> listLocatedStatus(Path f,
final PathFilter filter) {
return null;
}
public void copyFromLocalFile(Path src, Path dst) { }
public void moveFromLocalFile(Path[] srcs, Path dst) { }
public void moveFromLocalFile(Path src, Path dst) { }
public void copyToLocalFile(Path src, Path dst) { }
public void copyToLocalFile(boolean delSrc, Path src, Path dst,
boolean useRawLocalFileSystem) { }
public void moveToLocalFile(Path src, Path dst) { }
public long getBlockSize(Path f) { return 0; }
public FSDataOutputStream primitiveCreate(final Path f,
final EnumSet<CreateFlag> createFlag,
CreateOpts... opts) { return null; }
public void primitiveMkdir(Path f, FsPermission absolutePermission,
boolean createParent) { }
public int getDefaultPort() { return 0; }
public String getCanonicalServiceName() { return null; }
public Token<?> getDelegationToken(String renewer) throws IOException {
return null;
}
public boolean deleteOnExit(Path f) throws IOException {
return false;
}
public boolean cancelDeleteOnExit(Path f) throws IOException {
return false;
}
public Token<?>[] addDelegationTokens(String renewer, Credentials creds)
throws IOException {
return null;
}
public String getScheme() {
return "dontcheck";
}
public Path fixRelativePart(Path p) { return null; }
}
@Test
public void testFilterFileSystem() throws Exception {
for (Method m : FileSystem.class.getDeclaredMethods()) {
if (Modifier.isStatic(m.getModifiers()))
continue;
if (Modifier.isPrivate(m.getModifiers()))
continue;
if (Modifier.isFinal(m.getModifiers()))
continue;
try {
DontCheck.class.getMethod(m.getName(), m.getParameterTypes());
LOG.info("Skipping " + m);
} catch (NoSuchMethodException exc) {
LOG.info("Testing " + m);
try{
FilterFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
}
catch(NoSuchMethodException exc2){
LOG.error("FilterFileSystem doesn't implement " + m);
throw exc2;
}
}
}
}
@Test
public void testFilterEmbedInit() throws Exception {
FileSystem mockFs = createMockFs(false); // no conf = need init
checkInit(new FilterFileSystem(mockFs), true);
}
@Test
public void testFilterEmbedNoInit() throws Exception {
FileSystem mockFs = createMockFs(true); // has conf = skip init
checkInit(new FilterFileSystem(mockFs), false);
}
@Test
public void testLocalEmbedInit() throws Exception {
FileSystem mockFs = createMockFs(false); // no conf = need init
checkInit(new LocalFileSystem(mockFs), true);
}
@Test
public void testLocalEmbedNoInit() throws Exception {
FileSystem mockFs = createMockFs(true); // has conf = skip init
checkInit(new LocalFileSystem(mockFs), false);
}
private FileSystem createMockFs(boolean useConf) {
FileSystem mockFs = mock(FileSystem.class);
when(mockFs.getUri()).thenReturn(URI.create("mock:/"));
when(mockFs.getConf()).thenReturn(useConf ? conf : null);
return mockFs;
}
@Test
public void testGetLocalFsSetsConfs() throws Exception {
LocalFileSystem lfs = FileSystem.getLocal(conf);
checkFsConf(lfs, conf, 2);
}
@Test
public void testGetFilterLocalFsSetsConfs() throws Exception {
FilterFileSystem flfs =
(FilterFileSystem) FileSystem.get(URI.create("flfs:/"), conf);
checkFsConf(flfs, conf, 3);
}
@Test
public void testInitLocalFsSetsConfs() throws Exception {
LocalFileSystem lfs = new LocalFileSystem();
checkFsConf(lfs, null, 2);
lfs.initialize(lfs.getUri(), conf);
checkFsConf(lfs, conf, 2);
}
@Test
public void testInitFilterFsSetsEmbedConf() throws Exception {
LocalFileSystem lfs = new LocalFileSystem();
checkFsConf(lfs, null, 2);
FilterFileSystem ffs = new FilterFileSystem(lfs);
assertEquals(lfs, ffs.getRawFileSystem());
checkFsConf(ffs, null, 3);
ffs.initialize(URI.create("filter:/"), conf);
checkFsConf(ffs, conf, 3);
}
@Test
public void testInitFilterLocalFsSetsEmbedConf() throws Exception {
FilterFileSystem flfs = new FilterLocalFileSystem();
assertEquals(LocalFileSystem.class, flfs.getRawFileSystem().getClass());
checkFsConf(flfs, null, 3);
flfs.initialize(URI.create("flfs:/"), conf);
checkFsConf(flfs, conf, 3);
}
@Test
public void testVerifyChecksumPassthru() {
FileSystem mockFs = mock(FileSystem.class);
FileSystem fs = new FilterFileSystem(mockFs);
fs.setVerifyChecksum(false);
verify(mockFs).setVerifyChecksum(eq(false));
reset(mockFs);
fs.setVerifyChecksum(true);
verify(mockFs).setVerifyChecksum(eq(true));
}
@Test
public void testWriteChecksumPassthru() {
FileSystem mockFs = mock(FileSystem.class);
FileSystem fs = new FilterFileSystem(mockFs);
fs.setWriteChecksum(false);
verify(mockFs).setWriteChecksum(eq(false));
reset(mockFs);
fs.setWriteChecksum(true);
verify(mockFs).setWriteChecksum(eq(true));
}
private void checkInit(FilterFileSystem fs, boolean expectInit)
throws Exception {
URI uri = URI.create("filter:/");
fs.initialize(uri, conf);
FileSystem embedFs = fs.getRawFileSystem();
if (expectInit) {
verify(embedFs, times(1)).initialize(eq(uri), eq(conf));
} else {
verify(embedFs, times(0)).initialize(any(URI.class), any(Configuration.class));
}
}
// check the given fs's conf, and all its filtered filesystems
private void checkFsConf(FileSystem fs, Configuration conf, int expectDepth) {
int depth = 0;
while (true) {
depth++;
assertFalse("depth "+depth+">"+expectDepth, depth > expectDepth);
assertEquals(conf, fs.getConf());
if (!(fs instanceof FilterFileSystem)) {
break;
}
fs = ((FilterFileSystem) fs).getRawFileSystem();
}
assertEquals(expectDepth, depth);
}
private static class FilterLocalFileSystem extends FilterFileSystem {
FilterLocalFileSystem() {
super(new LocalFileSystem());
}
}
}
| 12,922 | 33.832884 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTest {
@Override
protected FileSystem createFileSystem() throws IOException {
return FileSystem.getLocal(new Configuration());
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
}
static Path wd = null;
@Override
protected Path getDefaultWorkingDirectory() throws IOException {
if (wd == null)
wd = FileSystem.getLocal(new Configuration()).getWorkingDirectory();
return wd;
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
}
@Test
@Override
public void testWDAbsolute() throws IOException {
Path absoluteDir = getTestRootPath(fSys, "test/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
}
}
| 1,920 | 28.553846 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.NoSuchElementException;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.After;
import org.junit.Assert;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import static org.apache.hadoop.fs.CreateFlag.*;
/**
* <p>
* A collection of tests for the {@link FileContext}.
* This test should be used for testing an instance of FileContext
* that has been initialized to a specific default FileSystem such a
* LocalFileSystem, HDFS,S3, etc.
* </p>
* <p>
* To test a given {@link FileSystem} implementation create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fc</code>
* {@link FileContext} instance variable.
*
* Since this a junit 4 you can also do a single setup before
* the start of any tests.
* E.g.
* @BeforeClass public static void clusterSetupAtBegining()
* @AfterClass public static void ClusterShutdownAtEnd()
* </p>
*/
public abstract class FileContextMainOperationsBaseTest {
private static String TEST_DIR_AAA2 = "test/hadoop2/aaa";
private static String TEST_DIR_AAA = "test/hadoop/aaa";
private static String TEST_DIR_AXA = "test/hadoop/axa";
private static String TEST_DIR_AXX = "test/hadoop/axx";
private static int numBlocks = 2;
public Path localFsRootPath;
protected final FileContextTestHelper fileContextTestHelper =
createFileContextHelper();
protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper();
}
protected static FileContext fc;
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
@Override
public boolean accept(final Path file) {
return true;
}
};
//A test filter with returns any path containing an "x" or "X"
final private static PathFilter TEST_X_FILTER = new PathFilter() {
@Override
public boolean accept(Path file) {
if(file.getName().contains("x") || file.getName().contains("X"))
return true;
else
return false;
}
};
private static final byte[] data = getFileData(numBlocks,
getDefaultBlockSize());
@Before
public void setUp() throws Exception {
File testBuildData = new File(System.getProperty("test.build.data",
"build/test/data"), RandomStringUtils.randomAlphanumeric(10));
Path rootPath = new Path(testBuildData.getAbsolutePath(),
"root-uri");
localFsRootPath = rootPath.makeQualified(LocalFileSystem.NAME, null);
fc.mkdir(getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true);
}
@After
public void tearDown() throws Exception {
boolean del = fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test")), true);
assertTrue(del);
fc.delete(localFsRootPath, true);
}
protected Path getDefaultWorkingDirectory() throws IOException {
return getTestRootPath(fc,
"/user/" + System.getProperty("user.name")).makeQualified(
fc.getDefaultFileSystem().getUri(), fc.getWorkingDirectory());
}
protected boolean renameSupported() {
return true;
}
protected IOException unwrapException(IOException e) {
return e;
}
@Test
public void testFsStatus() throws Exception {
FsStatus fsStatus = fc.getFsStatus(null);
Assert.assertNotNull(fsStatus);
//used, free and capacity are non-negative longs
Assert.assertTrue(fsStatus.getUsed() >= 0);
Assert.assertTrue(fsStatus.getRemaining() >= 0);
Assert.assertTrue(fsStatus.getCapacity() >= 0);
}
@Test
public void testWorkingDirectory() throws Exception {
// First we cd to our test root
Path workDir = new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test"));
fc.setWorkingDirectory(workDir);
Assert.assertEquals(workDir, fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir, fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(), fc.getWorkingDirectory());
// cd using a relative path
// Go back to our test root
workDir = new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test"));
fc.setWorkingDirectory(workDir);
Assert.assertEquals(workDir, fc.getWorkingDirectory());
Path relativeDir = new Path("existingDir1");
Path absoluteDir = new Path(workDir,"existingDir1");
fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
fc.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir, fc.getWorkingDirectory());
// cd using a absolute path
absoluteDir = getTestRootPath(fc, "test/existingDir2");
fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fc.getWorkingDirectory());
// Now open a file relative to the wd we just set above.
Path absolutePath = new Path(absoluteDir, "foo");
fc.create(absolutePath, EnumSet.of(CREATE)).close();
fc.open(new Path("foo")).close();
// Now mkdir relative to the dir we cd'ed to
fc.mkdir(new Path("newDir"), FileContext.DEFAULT_PERM, true);
Assert.assertTrue(isDir(fc, new Path(absoluteDir, "newDir")));
absoluteDir = getTestRootPath(fc, "nonexistingPath");
try {
fc.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
} catch (Exception e) {
// Exception as expected
}
// Try a URI
absoluteDir = new Path(localFsRootPath, "existingDir");
fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fc.getWorkingDirectory());
Path aRegularFile = new Path("aRegularFile");
createFile(aRegularFile);
try {
fc.setWorkingDirectory(aRegularFile);
fail("An IOException expected.");
} catch (IOException ioe) {
// okay
}
}
@Test
public void testMkdirs() throws Exception {
Path testDir = getTestRootPath(fc, "test/hadoop");
Assert.assertFalse(exists(fc, testDir));
Assert.assertFalse(isFile(fc, testDir));
fc.mkdir(testDir, FsPermission.getDefault(), true);
Assert.assertTrue(exists(fc, testDir));
Assert.assertFalse(isFile(fc, testDir));
fc.mkdir(testDir, FsPermission.getDefault(), true);
Assert.assertTrue(exists(fc, testDir));
Assert.assertFalse(isFile(fc, testDir));
Path parentDir = testDir.getParent();
Assert.assertTrue(exists(fc, parentDir));
Assert.assertFalse(isFile(fc, parentDir));
Path grandparentDir = parentDir.getParent();
Assert.assertTrue(exists(fc, grandparentDir));
Assert.assertFalse(isFile(fc, grandparentDir));
}
@Test
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = getTestRootPath(fc, "test/hadoop");
Assert.assertFalse(exists(fc, testDir));
fc.mkdir(testDir, FsPermission.getDefault(), true);
Assert.assertTrue(exists(fc, testDir));
createFile(getTestRootPath(fc, "test/hadoop/file"));
Path testSubDir = getTestRootPath(fc, "test/hadoop/file/subdir");
try {
fc.mkdir(testSubDir, FsPermission.getDefault(), true);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
Assert.assertFalse(exists(fc, testSubDir));
Path testDeepSubDir = getTestRootPath(fc, "test/hadoop/file/deep/sub/dir");
try {
fc.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
Assert.assertFalse(exists(fc, testDeepSubDir));
}
@Test
public void testGetFileStatusThrowsExceptionForNonExistentFile()
throws Exception {
try {
fc.getFileStatus(getTestRootPath(fc, "test/hadoop/file"));
Assert.fail("Should throw FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
}
}
public void testListStatusThrowsExceptionForNonExistentFile()
throws Exception {
try {
fc.listStatus(getTestRootPath(fc, "test/hadoop/file"));
Assert.fail("Should throw FileNotFoundException");
} catch (FileNotFoundException fnfe) {
// expected
}
}
@Test
public void testListStatus() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, "test/hadoop/a"),
getTestRootPath(fc, "test/hadoop/b"),
getTestRootPath(fc, "test/hadoop/c/1"), };
Assert.assertFalse(exists(fc, testDirs[0]));
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
// test listStatus that returns an array
FileStatus[] paths = fc.util().listStatus(getTestRootPath(fc, "test"));
Assert.assertEquals(1, paths.length);
Assert.assertEquals(getTestRootPath(fc, "test/hadoop"), paths[0].getPath());
paths = fc.util().listStatus(getTestRootPath(fc, "test/hadoop"));
Assert.assertEquals(3, paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc, "test/hadoop/a"),
paths));
Assert.assertTrue(containsPath(getTestRootPath(fc, "test/hadoop/b"),
paths));
Assert.assertTrue(containsPath(getTestRootPath(fc, "test/hadoop/c"),
paths));
paths = fc.util().listStatus(getTestRootPath(fc, "test/hadoop/a"));
Assert.assertEquals(0, paths.length);
// test listStatus that returns an iterator
RemoteIterator<FileStatus> pathsIterator =
fc.listStatus(getTestRootPath(fc, "test"));
Assert.assertEquals(getTestRootPath(fc, "test/hadoop"),
pathsIterator.next().getPath());
Assert.assertFalse(pathsIterator.hasNext());
pathsIterator = fc.listStatus(getTestRootPath(fc, "test/hadoop"));
FileStatus[] subdirs = new FileStatus[3];
int i=0;
while(i<3 && pathsIterator.hasNext()) {
subdirs[i++] = pathsIterator.next();
}
Assert.assertFalse(pathsIterator.hasNext());
Assert.assertTrue(i==3);
Assert.assertTrue(containsPath(getTestRootPath(fc, "test/hadoop/a"),
subdirs));
Assert.assertTrue(containsPath(getTestRootPath(fc, "test/hadoop/b"),
subdirs));
Assert.assertTrue(containsPath(getTestRootPath(fc, "test/hadoop/c"),
subdirs));
pathsIterator = fc.listStatus(getTestRootPath(fc, "test/hadoop/a"));
Assert.assertFalse(pathsIterator.hasNext());
}
@Test
public void testListStatusFilterWithNoMatches() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA2),
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
// listStatus with filters returns empty correctly
FileStatus[] filteredPaths = fc.util().listStatus(
getTestRootPath(fc, "test"), TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
public void testListStatusFilterWithSomeMatches() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AAA2), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
// should return 2 paths ("/test/hadoop/axa" and "/test/hadoop/axx")
FileStatus[] filteredPaths = fc.util()
.listStatus(getTestRootPath(fc, "test/hadoop"),
TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AXA), filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AXX), filteredPaths));
}
@Test
public void testGlobStatusNonExistentFile() throws Exception {
FileStatus[] paths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoopfsdf"));
Assert.assertNull(paths);
paths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoopfsdf/?"));
Assert.assertEquals(0, paths.length);
paths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoopfsdf/xyz*/?"));
Assert.assertEquals(0, paths.length);
}
@Test
public void testGlobStatusWithNoMatchesInPath() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AAA2), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
// should return nothing
FileStatus[] paths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/?"));
Assert.assertEquals(0, paths.length);
}
@Test
public void testGlobStatusSomeMatchesInDirectories() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AAA2), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
// Should return two items ("/test/hadoop" and "/test/hadoop2")
FileStatus[] paths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop*"));
Assert.assertEquals(2, paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,
"test/hadoop"), paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,
"test/hadoop2"), paths));
}
@Test
public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AAA2), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
//Should return all 4 items ("/test/hadoop/aaa", "/test/hadoop/axa"
//"/test/hadoop/axx", and "/test/hadoop2/axx")
FileStatus[] paths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop*/*"));
Assert.assertEquals(4, paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AAA), paths));
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AXA), paths));
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AXX), paths));
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AAA2), paths));
}
@Test
public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AAA2), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
//Should return only 2 items ("/test/hadoop/axa", "/test/hadoop/axx")
FileStatus[] paths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/ax?"));
Assert.assertEquals(2, paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AXA), paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AXX), paths));
}
@Test
public void testGlobStatusFilterWithEmptyPathResults() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AXX), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
//This should return an empty set
FileStatus[] filteredPaths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/?"),
DEFAULT_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
@Test
public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AXX), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
//This should return all three (aaa, axa, axx)
FileStatus[] filteredPaths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/*"),
DEFAULT_FILTER);
Assert.assertEquals(3, filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AAA), filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AXA), filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AXX), filteredPaths));
}
@Test
public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AXX), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
//This should return all three (aaa, axa, axx)
FileStatus[] filteredPaths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/a??"),
DEFAULT_FILTER);
Assert.assertEquals(3, filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AAA),
filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AXA),
filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AXX),
filteredPaths));
}
@Test
public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AXX), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
//This should return two (axa, axx)
FileStatus[] filteredPaths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/*"),
TEST_X_FILTER);
Assert.assertEquals(2, filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AXA), filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,
TEST_DIR_AXX), filteredPaths));
}
@Test
public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AXX), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
//This should return an empty set
FileStatus[] filteredPaths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/?"),
TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
@Test
public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter()
throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AXX), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
//This should return two (axa, axx)
FileStatus[] filteredPaths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/a??"),
TEST_X_FILTER);
Assert.assertEquals(2, filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AXA),
filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc, TEST_DIR_AXX),
filteredPaths));
}
@Test
public void testWriteReadAndDeleteEmptyFile() throws Exception {
writeReadAndDelete(0);
}
@Test
public void testWriteReadAndDeleteHalfABlock() throws Exception {
writeReadAndDelete(getDefaultBlockSize() / 2);
}
@Test
public void testWriteReadAndDeleteOneBlock() throws Exception {
writeReadAndDelete(getDefaultBlockSize());
}
@Test
public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
int blockSize = getDefaultBlockSize();
writeReadAndDelete(blockSize + (blockSize / 2));
}
@Test
public void testWriteReadAndDeleteTwoBlocks() throws Exception {
writeReadAndDelete(getDefaultBlockSize() * 2);
}
private void writeReadAndDelete(int len) throws IOException {
Path path = getTestRootPath(fc, "test/hadoop/file");
fc.mkdir(path.getParent(), FsPermission.getDefault(), true);
FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
CreateOpts.repFac((short) 1), CreateOpts
.blockSize(getDefaultBlockSize()));
out.write(data, 0, len);
out.close();
Assert.assertTrue("Exists", exists(fc, path));
Assert.assertEquals("Length", len, fc.getFileStatus(path).getLen());
FSDataInputStream in = fc.open(path);
byte[] buf = new byte[len];
in.readFully(0, buf);
in.close();
Assert.assertEquals(len, buf.length);
for (int i = 0; i < buf.length; i++) {
Assert.assertEquals("Position " + i, data[i], buf[i]);
}
Assert.assertTrue("Deleted", fc.delete(path, false));
Assert.assertFalse("No longer exists", exists(fc, path));
}
@Test(expected=HadoopIllegalArgumentException.class)
public void testNullCreateFlag() throws IOException {
Path p = getTestRootPath(fc, "test/file");
fc.create(p, null);
Assert.fail("Excepted exception not thrown");
}
@Test(expected=HadoopIllegalArgumentException.class)
public void testEmptyCreateFlag() throws IOException {
Path p = getTestRootPath(fc, "test/file");
fc.create(p, EnumSet.noneOf(CreateFlag.class));
Assert.fail("Excepted exception not thrown");
}
@Test(expected=FileAlreadyExistsException.class)
public void testCreateFlagCreateExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testCreateFlagCreateExistingFile");
createFile(p);
fc.create(p, EnumSet.of(CREATE));
Assert.fail("Excepted exception not thrown");
}
@Test(expected=FileNotFoundException.class)
public void testCreateFlagOverwriteNonExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testCreateFlagOverwriteNonExistingFile");
fc.create(p, EnumSet.of(OVERWRITE));
Assert.fail("Excepted exception not thrown");
}
@Test
public void testCreateFlagOverwriteExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testCreateFlagOverwriteExistingFile");
createFile(p);
FSDataOutputStream out = fc.create(p, EnumSet.of(OVERWRITE));
writeData(fc, p, out, data, data.length);
}
@Test(expected=FileNotFoundException.class)
public void testCreateFlagAppendNonExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testCreateFlagAppendNonExistingFile");
fc.create(p, EnumSet.of(APPEND));
Assert.fail("Excepted exception not thrown");
}
@Test
public void testCreateFlagAppendExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testCreateFlagAppendExistingFile");
createFile(p);
FSDataOutputStream out = fc.create(p, EnumSet.of(APPEND));
writeData(fc, p, out, data, 2 * data.length);
}
@Test
public void testCreateFlagCreateAppendNonExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testCreateFlagCreateAppendNonExistingFile");
FSDataOutputStream out = fc.create(p, EnumSet.of(CREATE, APPEND));
writeData(fc, p, out, data, data.length);
}
@Test
public void testCreateFlagCreateAppendExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testCreateFlagCreateAppendExistingFile");
createFile(p);
FSDataOutputStream out = fc.create(p, EnumSet.of(CREATE, APPEND));
writeData(fc, p, out, data, 2*data.length);
}
@Test(expected=HadoopIllegalArgumentException.class)
public void testCreateFlagAppendOverwrite() throws IOException {
Path p = getTestRootPath(fc, "test/nonExistent");
fc.create(p, EnumSet.of(APPEND, OVERWRITE));
Assert.fail("Excepted exception not thrown");
}
@Test(expected=HadoopIllegalArgumentException.class)
public void testCreateFlagAppendCreateOverwrite() throws IOException {
Path p = getTestRootPath(fc, "test/nonExistent");
fc.create(p, EnumSet.of(CREATE, APPEND, OVERWRITE));
Assert.fail("Excepted exception not thrown");
}
private static void writeData(FileContext fc, Path p, FSDataOutputStream out,
byte[] data, long expectedLen) throws IOException {
out.write(data, 0, data.length);
out.close();
Assert.assertTrue("Exists", exists(fc, p));
Assert.assertEquals("Length", expectedLen, fc.getFileStatus(p).getLen());
}
@Test
public void testWriteInNonExistentDirectory() throws IOException {
Path path = getTestRootPath(fc, "test/hadoop/file");
Assert.assertFalse("Parent doesn't exist", exists(fc, path.getParent()));
createFile(path);
Assert.assertTrue("Exists", exists(fc, path));
Assert.assertEquals("Length", data.length, fc.getFileStatus(path).getLen());
Assert.assertTrue("Parent exists", exists(fc, path.getParent()));
}
@Test
public void testDeleteNonExistentFile() throws IOException {
Path path = getTestRootPath(fc, "test/hadoop/file");
Assert.assertFalse("Doesn't exist", exists(fc, path));
Assert.assertFalse("No deletion", fc.delete(path, true));
}
@Test
public void testDeleteRecursively() throws IOException {
Path dir = getTestRootPath(fc, "test/hadoop");
Path file = getTestRootPath(fc, "test/hadoop/file");
Path subdir = getTestRootPath(fc, "test/hadoop/subdir");
createFile(file);
fc.mkdir(subdir,FsPermission.getDefault(), true);
Assert.assertTrue("File exists", exists(fc, file));
Assert.assertTrue("Dir exists", exists(fc, dir));
Assert.assertTrue("Subdir exists", exists(fc, subdir));
try {
fc.delete(dir, false);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
Assert.assertTrue("File still exists", exists(fc, file));
Assert.assertTrue("Dir still exists", exists(fc, dir));
Assert.assertTrue("Subdir still exists", exists(fc, subdir));
Assert.assertTrue("Deleted", fc.delete(dir, true));
Assert.assertFalse("File doesn't exist", exists(fc, file));
Assert.assertFalse("Dir doesn't exist", exists(fc, dir));
Assert.assertFalse("Subdir doesn't exist", exists(fc, subdir));
}
@Test
public void testDeleteEmptyDirectory() throws IOException {
Path dir = getTestRootPath(fc, "test/hadoop");
fc.mkdir(dir, FsPermission.getDefault(), true);
Assert.assertTrue("Dir exists", exists(fc, dir));
Assert.assertTrue("Deleted", fc.delete(dir, false));
Assert.assertFalse("Dir doesn't exist", exists(fc, dir));
}
@Test
public void testRenameNonExistentPath() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/nonExistent");
Path dst = getTestRootPath(fc, "test/new/newpath");
try {
rename(src, dst, false, false, false, Rename.NONE);
Assert.fail("Should throw FileNotFoundException");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src, dst, false, false, false, Rename.OVERWRITE);
Assert.fail("Should throw FileNotFoundException");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
@Test
public void testRenameFileToNonExistentDirectory() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fc, "test/nonExistent/newfile");
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
@Test
public void testRenameFileToDestinationWithParentFile() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fc, "test/parentFile/newfile");
createFile(dst.getParent());
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
}
@Test
public void testRenameFileToExistingParent() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fc, "test/new/newfile");
fc.mkdir(dst.getParent(), FileContext.DEFAULT_PERM, true);
rename(src, dst, true, false, true, Rename.OVERWRITE);
}
@Test
public void testRenameFileToItself() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/file");
createFile(src);
try {
rename(src, src, false, true, false, Rename.NONE);
Assert.fail("Renamed file to itself");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Also fails with overwrite
try {
rename(src, src, false, true, false, Rename.OVERWRITE);
Assert.fail("Renamed file to itself");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
}
@Test
public void testRenameFileAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fc, "test/new/existingFile");
createFile(dst);
// Fails without overwrite option
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Succeeds with overwrite option
rename(src, dst, true, false, true, Rename.OVERWRITE);
}
@Test
public void testRenameFileAsExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/file");
createFile(src);
Path dst = getTestRootPath(fc, "test/new/existingDir");
fc.mkdir(dst, FileContext.DEFAULT_PERM, true);
// Fails without overwrite option
try {
rename(src, dst, false, false, true, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
// File cannot be renamed as directory
try {
rename(src, dst, false, false, true, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
}
@Test
public void testRenameDirectoryToItself() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/dir");
fc.mkdir(src, FileContext.DEFAULT_PERM, true);
try {
rename(src, src, false, true, false, Rename.NONE);
Assert.fail("Renamed directory to itself");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Also fails with overwrite
try {
rename(src, src, false, true, false, Rename.OVERWRITE);
Assert.fail("Renamed directory to itself");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
}
@Test
public void testRenameDirectoryToNonExistentParent() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/dir");
fc.mkdir(src, FileContext.DEFAULT_PERM, true);
Path dst = getTestRootPath(fc, "test/nonExistent/newdir");
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
@Test
public void testRenameDirectoryAsNonExistentDirectory() throws Exception {
testRenameDirectoryAsNonExistentDirectory(Rename.NONE);
tearDown();
testRenameDirectoryAsNonExistentDirectory(Rename.OVERWRITE);
}
private void testRenameDirectoryAsNonExistentDirectory(Rename... options) throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/dir");
fc.mkdir(src, FileContext.DEFAULT_PERM, true);
createFile(getTestRootPath(fc, "test/hadoop/dir/file1"));
createFile(getTestRootPath(fc, "test/hadoop/dir/subdir/file2"));
Path dst = getTestRootPath(fc, "test/new/newdir");
fc.mkdir(dst.getParent(), FileContext.DEFAULT_PERM, true);
rename(src, dst, true, false, true, options);
Assert.assertFalse("Nested file1 exists",
exists(fc, getTestRootPath(fc, "test/hadoop/dir/file1")));
Assert.assertFalse("Nested file2 exists",
exists(fc, getTestRootPath(fc, "test/hadoop/dir/subdir/file2")));
Assert.assertTrue("Renamed nested file1 exists",
exists(fc, getTestRootPath(fc, "test/new/newdir/file1")));
Assert.assertTrue("Renamed nested exists",
exists(fc, getTestRootPath(fc, "test/new/newdir/subdir/file2")));
}
@Test
public void testRenameDirectoryAsEmptyDirectory() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/dir");
fc.mkdir(src, FileContext.DEFAULT_PERM, true);
createFile(getTestRootPath(fc, "test/hadoop/dir/file1"));
createFile(getTestRootPath(fc, "test/hadoop/dir/subdir/file2"));
Path dst = getTestRootPath(fc, "test/new/newdir");
fc.mkdir(dst, FileContext.DEFAULT_PERM, true);
// Fails without overwrite option
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
// Expected (cannot over-write non-empty destination)
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Succeeds with the overwrite option
rename(src, dst, true, false, true, Rename.OVERWRITE);
}
@Test
public void testRenameDirectoryAsNonEmptyDirectory() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/dir");
fc.mkdir(src, FileContext.DEFAULT_PERM, true);
createFile(getTestRootPath(fc, "test/hadoop/dir/file1"));
createFile(getTestRootPath(fc, "test/hadoop/dir/subdir/file2"));
Path dst = getTestRootPath(fc, "test/new/newdir");
fc.mkdir(dst, FileContext.DEFAULT_PERM, true);
createFile(getTestRootPath(fc, "test/new/newdir/file1"));
// Fails without overwrite option
try {
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
// Expected (cannot over-write non-empty destination)
Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
// Fails even with the overwrite option
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException ex) {
// Expected (cannot over-write non-empty destination)
}
}
@Test
public void testRenameDirectoryAsFile() throws Exception {
if (!renameSupported()) return;
Path src = getTestRootPath(fc, "test/hadoop/dir");
fc.mkdir(src, FileContext.DEFAULT_PERM, true);
Path dst = getTestRootPath(fc, "test/new/newfile");
createFile(dst);
// Fails without overwrite option
try {
rename(src, dst, false, true, true, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
}
// Directory cannot be renamed as existing file
try {
rename(src, dst, false, true, true, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException ex) {
}
}
@Test
public void testInputStreamClosedTwice() throws IOException {
//HADOOP-4760 according to Closeable#close() closing already-closed
//streams should have no effect.
Path src = getTestRootPath(fc, "test/hadoop/file");
createFile(src);
FSDataInputStream in = fc.open(src);
in.close();
in.close();
}
@Test
public void testOutputStreamClosedTwice() throws IOException {
//HADOOP-4760 according to Closeable#close() closing already-closed
//streams should have no effect.
Path src = getTestRootPath(fc, "test/hadoop/file");
FSDataOutputStream out = fc.create(src, EnumSet.of(CREATE),
Options.CreateOpts.createParent());
out.writeChar('H'); //write some data
out.close();
out.close();
}
@Test
/** Test FileContext APIs when symlinks are not supported */
public void testUnsupportedSymlink() throws IOException {
Path file = getTestRootPath(fc, "file");
Path link = getTestRootPath(fc, "linkToFile");
if (!fc.getDefaultFileSystem().supportsSymlinks()) {
try {
fc.createSymlink(file, link, false);
Assert.fail("Created a symlink on a file system that "+
"does not support symlinks.");
} catch (IOException e) {
// Expected
}
createFile(file);
try {
fc.getLinkTarget(file);
Assert.fail("Got a link target on a file system that "+
"does not support symlinks.");
} catch (IOException e) {
// Expected
}
Assert.assertEquals(fc.getFileStatus(file), fc.getFileLinkStatus(file));
}
}
protected void createFile(Path path) throws IOException {
FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
Options.CreateOpts.createParent());
out.write(data, 0, data.length);
out.close();
}
private void rename(Path src, Path dst, boolean renameShouldSucceed,
boolean srcExists, boolean dstExists, Rename... options)
throws IOException {
fc.rename(src, dst, options);
if (!renameShouldSucceed)
Assert.fail("rename should have thrown exception");
Assert.assertEquals("Source exists", srcExists, exists(fc, src));
Assert.assertEquals("Destination exists", dstExists, exists(fc, dst));
}
private boolean containsPath(Path path, FileStatus[] filteredPaths)
throws IOException {
for(int i = 0; i < filteredPaths.length; i ++) {
if (getTestRootPath(fc, path.toString()).equals(
filteredPaths[i].getPath()))
return true;
}
return false;
}
@Test
public void testOpen2() throws IOException {
final Path rootPath = getTestRootPath(fc, "test");
//final Path rootPath = getAbsoluteTestRootPath(fc);
final Path path = new Path(rootPath, "zoo");
createFile(path);
final long length = fc.getFileStatus(path).getLen();
FSDataInputStream fsdis = fc.open(path, 2048);
try {
byte[] bb = new byte[(int)length];
fsdis.readFully(bb);
assertArrayEquals(data, bb);
} finally {
fsdis.close();
}
}
@Test
public void testSetVerifyChecksum() throws IOException {
final Path rootPath = getTestRootPath(fc, "test");
final Path path = new Path(rootPath, "zoo");
FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
Options.CreateOpts.createParent());
try {
// instruct FS to verify checksum through the FileContext:
fc.setVerifyChecksum(true, path);
out.write(data, 0, data.length);
} finally {
out.close();
}
// NB: underlying FS may be different (this is an abstract test),
// so we cannot assert .zoo.crc existence.
// Instead, we check that the file is read correctly:
FileStatus fileStatus = fc.getFileStatus(path);
final long len = fileStatus.getLen();
assertTrue(len == data.length);
byte[] bb = new byte[(int)len];
FSDataInputStream fsdis = fc.open(path);
try {
fsdis.readFully(bb);
} finally {
fsdis.close();
}
assertArrayEquals(data, bb);
}
@Test
public void testListCorruptFileBlocks() throws IOException {
final Path rootPath = getTestRootPath(fc, "test");
final Path path = new Path(rootPath, "zoo");
createFile(path);
try {
final RemoteIterator<Path> remoteIterator = fc
.listCorruptFileBlocks(path);
if (listCorruptedBlocksSupported()) {
assertTrue(remoteIterator != null);
Path p;
while (remoteIterator.hasNext()) {
p = remoteIterator.next();
System.out.println("corrupted block: " + p);
}
try {
remoteIterator.next();
fail();
} catch (NoSuchElementException nsee) {
// okay
}
} else {
fail();
}
} catch (UnsupportedOperationException uoe) {
if (listCorruptedBlocksSupported()) {
fail(uoe.toString());
} else {
// okay
}
}
}
protected abstract boolean listCorruptedBlocksSupported();
@Test
public void testDeleteOnExitUnexisting() throws IOException {
final Path rootPath = getTestRootPath(fc, "test");
final Path path = new Path(rootPath, "zoo");
boolean registered = fc.deleteOnExit(path);
// because "zoo" does not exist:
assertTrue(!registered);
}
@Test
public void testFileContextStatistics() throws IOException {
FileContext.clearStatistics();
final Path rootPath = getTestRootPath(fc, "test");
final Path path = new Path(rootPath, "zoo");
createFile(path);
byte[] bb = new byte[data.length];
FSDataInputStream fsdis = fc.open(path);
try {
fsdis.readFully(bb);
} finally {
fsdis.close();
}
assertArrayEquals(data, bb);
FileContext.printStatistics();
}
@Test
/*
* Test method
* org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
*/
public void testGetFileContext1() throws IOException {
final Path rootPath = getTestRootPath(fc, "test");
AbstractFileSystem asf = fc.getDefaultFileSystem();
// create FileContext using the protected #getFileContext(1) method:
FileContext fc2 = FileContext.getFileContext(asf);
// Now just check that this context can do something reasonable:
final Path path = new Path(rootPath, "zoo");
FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
Options.CreateOpts.createParent());
out.close();
Path pathResolved = fc2.resolvePath(path);
assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
private Path getTestRootPath(FileContext fc, String pathString) {
return fileContextTestHelper.getTestRootPath(fc, pathString);
}
}
| 46,192 | 33.344238 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import org.junit.Test;
public class TestBlockLocation {
private static final String[] EMPTY_STR_ARRAY = new String[0];
private static void checkBlockLocation(final BlockLocation loc)
throws Exception {
checkBlockLocation(loc, 0, 0, false);
}
private static void checkBlockLocation(final BlockLocation loc,
final long offset, final long length, final boolean corrupt)
throws Exception {
checkBlockLocation(loc, EMPTY_STR_ARRAY, EMPTY_STR_ARRAY, EMPTY_STR_ARRAY,
EMPTY_STR_ARRAY, offset, length, corrupt);
}
private static void checkBlockLocation(final BlockLocation loc,
String[] names, String[] hosts, String[] cachedHosts,
String[] topologyPaths, final long offset, final long length,
final boolean corrupt) throws Exception {
assertNotNull(loc.getHosts());
assertNotNull(loc.getCachedHosts());
assertNotNull(loc.getNames());
assertNotNull(loc.getTopologyPaths());
assertArrayEquals(hosts, loc.getHosts());
assertArrayEquals(cachedHosts, loc.getCachedHosts());
assertArrayEquals(names, loc.getNames());
assertArrayEquals(topologyPaths, loc.getTopologyPaths());
assertEquals(offset, loc.getOffset());
assertEquals(length, loc.getLength());
assertEquals(corrupt, loc.isCorrupt());
}
/**
* Call all the constructors and verify the delegation is working properly
*/
@Test(timeout = 5000)
public void testBlockLocationConstructors() throws Exception {
//
BlockLocation loc;
loc = new BlockLocation();
checkBlockLocation(loc);
loc = new BlockLocation(null, null, 1, 2);
checkBlockLocation(loc, 1, 2, false);
loc = new BlockLocation(null, null, null, 1, 2);
checkBlockLocation(loc, 1, 2, false);
loc = new BlockLocation(null, null, null, 1, 2, true);
checkBlockLocation(loc, 1, 2, true);
loc = new BlockLocation(null, null, null, null, 1, 2, true);
checkBlockLocation(loc, 1, 2, true);
}
/**
* Call each of the setters and verify
*/
@Test(timeout = 5000)
public void testBlockLocationSetters() throws Exception {
BlockLocation loc;
loc = new BlockLocation();
// Test that null sets the empty array
loc.setHosts(null);
loc.setCachedHosts(null);
loc.setNames(null);
loc.setTopologyPaths(null);
checkBlockLocation(loc);
// Test that not-null gets set properly
String[] names = new String[] { "name" };
String[] hosts = new String[] { "host" };
String[] cachedHosts = new String[] { "cachedHost" };
String[] topologyPaths = new String[] { "path" };
loc.setNames(names);
loc.setHosts(hosts);
loc.setCachedHosts(cachedHosts);
loc.setTopologyPaths(topologyPaths);
loc.setOffset(1);
loc.setLength(2);
loc.setCorrupt(true);
checkBlockLocation(loc, names, hosts, cachedHosts, topologyPaths, 1, 2,
true);
}
}
| 3,859 | 34.412844 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
public class TestLocal_S3FileContextURI extends FileContextURIBase {
@Override
@Before
public void setUp() throws Exception {
Configuration S3Conf = new Configuration();
Configuration localConf = new Configuration();
S3Conf.set(FS_DEFAULT_NAME_DEFAULT, S3Conf.get("test.fs.s3.name"));
fc1 = FileContext.getFileContext(S3Conf);
fc2 = FileContext.getFileContext(localConf);
}
}
| 1,407 | 35.102564 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertNotSame;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.Test;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.Semaphore;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class TestFileSystemCaching {
@Test
public void testCacheEnabled() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
FileSystem fs1 = FileSystem.get(new URI("cachedfile://a"), conf);
FileSystem fs2 = FileSystem.get(new URI("cachedfile://a"), conf);
assertSame(fs1, fs2);
}
static class DefaultFs extends LocalFileSystem {
URI uri;
@Override
public void initialize(URI uri, Configuration conf) {
this.uri = uri;
}
@Override
public URI getUri() {
return uri;
}
}
@Test
public void testDefaultFsUris() throws Exception {
final Configuration conf = new Configuration();
conf.set("fs.defaultfs.impl", DefaultFs.class.getName());
final URI defaultUri = URI.create("defaultfs://host");
FileSystem.setDefaultUri(conf, defaultUri);
FileSystem fs = null;
// sanity check default fs
final FileSystem defaultFs = FileSystem.get(conf);
assertEquals(defaultUri, defaultFs.getUri());
// has scheme, no auth
fs = FileSystem.get(URI.create("defaultfs:/"), conf);
assertSame(defaultFs, fs);
fs = FileSystem.get(URI.create("defaultfs:///"), conf);
assertSame(defaultFs, fs);
// has scheme, same auth
fs = FileSystem.get(URI.create("defaultfs://host"), conf);
assertSame(defaultFs, fs);
// has scheme, different auth
fs = FileSystem.get(URI.create("defaultfs://host2"), conf);
assertNotSame(defaultFs, fs);
// no scheme, no auth
fs = FileSystem.get(URI.create("/"), conf);
assertSame(defaultFs, fs);
// no scheme, same auth
try {
fs = FileSystem.get(URI.create("//host"), conf);
fail("got fs with auth but no scheme");
} catch (Exception e) {
assertEquals("No FileSystem for scheme: null", e.getMessage());
}
// no scheme, different auth
try {
fs = FileSystem.get(URI.create("//host2"), conf);
fail("got fs with auth but no scheme");
} catch (Exception e) {
assertEquals("No FileSystem for scheme: null", e.getMessage());
}
}
public static class InitializeForeverFileSystem extends LocalFileSystem {
final static Semaphore sem = new Semaphore(0);
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
// notify that InitializeForeverFileSystem started initialization
sem.release();
try {
while (true) {
Thread.sleep(1000);
}
} catch (InterruptedException e) {
return;
}
}
}
@Test
public void testCacheEnabledWithInitializeForeverFS() throws Exception {
final Configuration conf = new Configuration();
Thread t = new Thread() {
@Override
public void run() {
conf.set("fs.localfs1.impl", "org.apache.hadoop.fs." +
"TestFileSystemCaching$InitializeForeverFileSystem");
try {
FileSystem.get(new URI("localfs1://a"), conf);
} catch (IOException e) {
e.printStackTrace();
} catch (URISyntaxException e) {
e.printStackTrace();
}
}
};
t.start();
// wait for InitializeForeverFileSystem to start initialization
InitializeForeverFileSystem.sem.acquire();
conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
FileSystem.get(new URI("cachedfile://a"), conf);
t.interrupt();
t.join();
}
@Test
public void testCacheDisabled() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.uncachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
conf.setBoolean("fs.uncachedfile.impl.disable.cache", true);
FileSystem fs1 = FileSystem.get(new URI("uncachedfile://a"), conf);
FileSystem fs2 = FileSystem.get(new URI("uncachedfile://a"), conf);
assertNotSame(fs1, fs2);
}
@SuppressWarnings("unchecked")
@Test
public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
final Configuration conf = new Configuration();
conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
//Since the UGIs are the same, we should have the same filesystem for both
assertSame(fsA, fsA1);
FileSystem fsB = ugiB.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
//Since the UGIs are different, we should end up with different filesystems
//corresponding to the two UGIs
assertNotSame(fsA, fsB);
Token<T> t1 = mock(Token.class);
UserGroupInformation ugiA2 = UserGroupInformation.createRemoteUser("foo");
fsA = ugiA2.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
// Although the users in the UGI are same, they have different subjects
// and so are different.
assertNotSame(fsA, fsA1);
ugiA.addToken(t1);
fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
// Make sure that different UGI's with the same subject lead to the same
// file system.
assertSame(fsA, fsA1);
}
@Test
public void testUserFS() throws Exception {
final Configuration conf = new Configuration();
conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
FileSystem fsU1 = FileSystem.get(new URI("cachedfile://a"), conf, "bar");
FileSystem fsU2 = FileSystem.get(new URI("cachedfile://a"), conf, "foo");
assertNotSame(fsU1, fsU2);
}
@Test
public void testFsUniqueness() throws Exception {
final Configuration conf = new Configuration();
conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
// multiple invocations of FileSystem.get return the same object.
FileSystem fs1 = FileSystem.get(conf);
FileSystem fs2 = FileSystem.get(conf);
assertTrue(fs1 == fs2);
// multiple invocations of FileSystem.newInstance return different objects
fs1 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
fs2 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
assertTrue(fs1 != fs2 && !fs1.equals(fs2));
fs1.close();
fs2.close();
}
@Test
public void testCloseAllForUGI() throws Exception {
final Configuration conf = new Configuration();
conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
//Now we should get the cached filesystem
FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
assertSame(fsA, fsA1);
FileSystem.closeAllForUGI(ugiA);
//Now we should get a different (newly created) filesystem
fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
assertNotSame(fsA, fsA1);
}
@Test
public void testDelete() throws IOException {
FileSystem mockFs = mock(FileSystem.class);
FileSystem fs = new FilterFileSystem(mockFs);
Path path = new Path("/a");
fs.delete(path, false);
verify(mockFs).delete(eq(path), eq(false));
reset(mockFs);
fs.delete(path, true);
verify(mockFs).delete(eq(path), eq(true));
}
@Test
public void testDeleteOnExit() throws IOException {
FileSystem mockFs = mock(FileSystem.class);
FileSystem fs = new FilterFileSystem(mockFs);
Path path = new Path("/a");
// delete on close if path does exist
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
assertTrue(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
reset(mockFs);
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
fs.close();
verify(mockFs).getFileStatus(eq(path));
verify(mockFs).delete(eq(path), eq(true));
}
@Test
public void testDeleteOnExitFNF() throws IOException {
FileSystem mockFs = mock(FileSystem.class);
FileSystem fs = new FilterFileSystem(mockFs);
Path path = new Path("/a");
// don't delete on close if path doesn't exist
assertFalse(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
reset(mockFs);
fs.close();
verify(mockFs, never()).getFileStatus(eq(path));
verify(mockFs, never()).delete(any(Path.class), anyBoolean());
}
@Test
public void testDeleteOnExitRemoved() throws IOException {
FileSystem mockFs = mock(FileSystem.class);
FileSystem fs = new FilterFileSystem(mockFs);
Path path = new Path("/a");
// don't delete on close if path existed, but later removed
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
assertTrue(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
reset(mockFs);
fs.close();
verify(mockFs).getFileStatus(eq(path));
verify(mockFs, never()).delete(any(Path.class), anyBoolean());
}
@Test
public void testCancelDeleteOnExit() throws IOException {
FileSystem mockFs = mock(FileSystem.class);
FileSystem fs = new FilterFileSystem(mockFs);
Path path = new Path("/a");
// don't delete on close if path existed, but later cancelled
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
assertTrue(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
assertTrue(fs.cancelDeleteOnExit(path));
assertFalse(fs.cancelDeleteOnExit(path)); // false because not registered
reset(mockFs);
fs.close();
verify(mockFs, never()).getFileStatus(any(Path.class));
verify(mockFs, never()).delete(any(Path.class), anyBoolean());
}
}
| 12,602 | 33.911357 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import junit.framework.TestCase;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/** This test makes sure that "DU" does not get to run on each call to getUsed */
public class TestDU extends TestCase {
final static private File DU_DIR = new File(
System.getProperty("test.build.data","/tmp"), "dutmp");
@Override
public void setUp() {
FileUtil.fullyDelete(DU_DIR);
assertTrue(DU_DIR.mkdirs());
}
@Override
public void tearDown() throws IOException {
FileUtil.fullyDelete(DU_DIR);
}
private void createFile(File newFile, int size) throws IOException {
// write random data so that filesystems with compression enabled (e.g., ZFS)
// can't compress the file
Random random = new Random();
byte[] data = new byte[size];
random.nextBytes(data);
newFile.createNewFile();
RandomAccessFile file = new RandomAccessFile(newFile, "rws");
file.write(data);
file.getFD().sync();
file.close();
}
/**
* Verify that du returns expected used space for a file.
* We assume here that if a file system crates a file of size
* that is a multiple of the block size in this file system,
* then the used size for the file will be exactly that size.
* This is true for most file systems.
*
* @throws IOException
* @throws InterruptedException
*/
public void testDU() throws IOException, InterruptedException {
final int writtenSize = 32*1024; // writing 32K
// Allow for extra 4K on-disk slack for local file systems
// that may store additional file metadata (eg ext attrs).
final int slack = 4*1024;
File file = new File(DU_DIR, "data");
createFile(file, writtenSize);
Thread.sleep(5000); // let the metadata updater catch up
DU du = new DU(file, 10000);
du.start();
long duSize = du.getUsed();
du.shutdown();
assertTrue("Invalid on-disk size",
duSize >= writtenSize &&
writtenSize <= (duSize + slack));
//test with 0 interval, will not launch thread
du = new DU(file, 0);
du.start();
duSize = du.getUsed();
du.shutdown();
assertTrue("Invalid on-disk size",
duSize >= writtenSize &&
writtenSize <= (duSize + slack));
//test without launching thread
du = new DU(file, 10000);
duSize = du.getUsed();
assertTrue("Invalid on-disk size",
duSize >= writtenSize &&
writtenSize <= (duSize + slack));
}
public void testDUGetUsedWillNotReturnNegative() throws IOException {
File file = new File(DU_DIR, "data");
assertTrue(file.createNewFile());
Configuration conf = new Configuration();
conf.setLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, 10000L);
DU du = new DU(file, conf);
du.decDfsUsed(Long.MAX_VALUE);
long duSize = du.getUsed();
assertTrue(String.valueOf(duSize), duSize >= 0L);
}
public void testDUSetInitialValue() throws IOException {
File file = new File(DU_DIR, "dataX");
createFile(file, 8192);
DU du = new DU(file, 3000, 1024);
du.start();
assertTrue("Initial usage setting not honored", du.getUsed() == 1024);
// wait until the first du runs.
try {
Thread.sleep(5000);
} catch (InterruptedException ie) {}
assertTrue("Usage didn't get updated", du.getUsed() == 8192);
}
}
| 4,340 | 31.155556 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.util.EnumSet;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
import org.apache.hadoop.io.IOUtils;
import org.junit.Assert;
/**
* Helper class for unit tests.
*/
public final class FileContextTestHelper {
private static final int DEFAULT_BLOCK_SIZE = 1024;
private static final int DEFAULT_NUM_BLOCKS = 2;
private final String testRootDir;
private String absTestRootDir = null;
/**
* Create a context with test root relative to the <wd>/build/test/data
*/
public FileContextTestHelper() {
this(System.getProperty("test.build.data", "target/test/data") + "/" +
RandomStringUtils.randomAlphanumeric(10));
}
/**
* Create a context with the given test root
*/
public FileContextTestHelper(String testRootDir) {
this.testRootDir = testRootDir;
}
public static int getDefaultBlockSize() {
return DEFAULT_BLOCK_SIZE;
}
public static byte[] getFileData(int numOfBlocks, long blockSize) {
byte[] data = new byte[(int) (numOfBlocks * blockSize)];
for (int i = 0; i < data.length; i++) {
data[i] = (byte) (i % 10);
}
return data;
}
public Path getTestRootPath(FileContext fc) {
return fc.makeQualified(new Path(testRootDir));
}
public Path getTestRootPath(FileContext fc, String pathString) {
return fc.makeQualified(new Path(testRootDir, pathString));
}
// the getAbsolutexxx method is needed because the root test dir
// can be messed up by changing the working dir.
public String getAbsoluteTestRootDir(FileContext fc) {
if (absTestRootDir == null) {
if (new Path(testRootDir).isAbsolute()) {
absTestRootDir = testRootDir;
} else {
absTestRootDir = fc.getWorkingDirectory().toString() + "/"
+ testRootDir;
}
}
return absTestRootDir;
}
public Path getAbsoluteTestRootPath(FileContext fc) {
return fc.makeQualified(new Path(getAbsoluteTestRootDir(fc)));
}
public Path getDefaultWorkingDirectory(FileContext fc) {
return getTestRootPath(fc, "/user/" + System.getProperty("user.name"))
.makeQualified(fc.getDefaultFileSystem().getUri(),
fc.getWorkingDirectory());
}
/*
* Create files with numBlocks blocks each with block size blockSize.
*/
public static long createFile(FileContext fc, Path path, int numBlocks,
CreateOpts... options) throws IOException {
BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
: DEFAULT_BLOCK_SIZE;
FSDataOutputStream out =
fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
return data.length;
}
public static long createFile(FileContext fc, Path path, int numBlocks,
int blockSize) throws IOException {
return createFile(fc, path, numBlocks, CreateOpts.blockSize(blockSize),
CreateOpts.createParent());
}
public static long createFile(FileContext fc, Path path) throws IOException {
return createFile(fc, path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
}
public long createFile(FileContext fc, String name) throws IOException {
Path path = getTestRootPath(fc, name);
return createFile(fc, path);
}
public long createFileNonRecursive(FileContext fc, String name)
throws IOException {
Path path = getTestRootPath(fc, name);
return createFileNonRecursive(fc, path);
}
public static long createFileNonRecursive(FileContext fc, Path path)
throws IOException {
return createFile(fc, path, DEFAULT_NUM_BLOCKS, CreateOpts.donotCreateParent());
}
public static void appendToFile(FileContext fc, Path path, int numBlocks,
CreateOpts... options) throws IOException {
BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
: DEFAULT_BLOCK_SIZE;
FSDataOutputStream out;
out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
}
public static boolean exists(FileContext fc, Path p) throws IOException {
return fc.util().exists(p);
}
public static boolean isFile(FileContext fc, Path p) throws IOException {
try {
return fc.getFileStatus(p).isFile();
} catch (FileNotFoundException e) {
return false;
}
}
public static boolean isDir(FileContext fc, Path p) throws IOException {
try {
return fc.getFileStatus(p).isDirectory();
} catch (FileNotFoundException e) {
return false;
}
}
public static boolean isSymlink(FileContext fc, Path p) throws IOException {
try {
return fc.getFileLinkStatus(p).isSymlink();
} catch (FileNotFoundException e) {
return false;
}
}
public static void writeFile(FileContext fc, Path path, byte b[])
throws IOException {
FSDataOutputStream out =
fc.create(path,EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent());
out.write(b);
out.close();
}
public static byte[] readFile(FileContext fc, Path path, int len)
throws IOException {
DataInputStream dis = fc.open(path);
byte[] buffer = new byte[len];
IOUtils.readFully(dis, buffer, 0, len);
dis.close();
return buffer;
}
public FileStatus containsPath(FileContext fc, Path path,
FileStatus[] dirList) {
return containsPath(getTestRootPath(fc, path.toString()), dirList);
}
public static FileStatus containsPath(Path path, FileStatus[] dirList) {
for(int i = 0; i < dirList.length; i ++) {
if (path.equals(dirList[i].getPath()))
return dirList[i];
}
return null;
}
public FileStatus containsPath(FileContext fc, String path,
FileStatus[] dirList) {
return containsPath(fc, new Path(path), dirList);
}
public static enum fileType {isDir, isFile, isSymlink};
public static void checkFileStatus(FileContext aFc, String path,
fileType expectedType) throws IOException {
FileStatus s = aFc.getFileStatus(new Path(path));
Assert.assertNotNull(s);
if (expectedType == fileType.isDir) {
Assert.assertTrue(s.isDirectory());
} else if (expectedType == fileType.isFile) {
Assert.assertTrue(s.isFile());
} else if (expectedType == fileType.isSymlink) {
Assert.assertTrue(s.isSymlink());
}
Assert.assertEquals(aFc.makeQualified(new Path(path)), s.getPath());
}
public static void checkFileLinkStatus(FileContext aFc, String path,
fileType expectedType) throws IOException {
FileStatus s = aFc.getFileLinkStatus(new Path(path));
Assert.assertNotNull(s);
if (expectedType == fileType.isDir) {
Assert.assertTrue(s.isDirectory());
} else if (expectedType == fileType.isFile) {
Assert.assertTrue(s.isFile());
} else if (expectedType == fileType.isSymlink) {
Assert.assertTrue(s.isSymlink());
}
Assert.assertEquals(aFc.makeQualified(new Path(path)), s.getPath());
}
}
| 8,246 | 32.120482 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import junit.framework.AssertionFailedError;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SetSpanReceiver;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.SamplerBuilder;
import org.apache.htrace.impl.AlwaysSampler;
import org.junit.Assert;
import org.junit.Test;
public class TestFsShell {
@Test
public void testConfWithInvalidFile() throws Throwable {
String[] args = new String[1];
args[0] = "--conf=invalidFile";
Throwable th = null;
try {
FsShell.main(args);
} catch (Exception e) {
th = e;
}
if (!(th instanceof RuntimeException)) {
throw new AssertionFailedError("Expected Runtime exception, got: " + th)
.initCause(th);
}
}
@Test
public void testTracing() throws Throwable {
Configuration conf = new Configuration();
String prefix = FsShell.SEHLL_HTRACE_PREFIX;
conf.set(prefix + SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
SetSpanReceiver.class.getName());
conf.set(prefix + SamplerBuilder.SAMPLER_CONF_KEY,
AlwaysSampler.class.getName());
conf.setQuietMode(false);
FsShell shell = new FsShell(conf);
int res;
try {
res = ToolRunner.run(shell, new String[]{"-help", "ls", "cat"});
} finally {
shell.close();
}
SetSpanReceiver.assertSpanNamesFound(new String[]{"help"});
Assert.assertEquals("-help ls cat",
SetSpanReceiver.getMap()
.get("help").get(0).getKVAnnotations().get("args"));
}
}
| 2,411 | 32.971831 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.List;
import junit.framework.TestCase;
public class TestGlobExpander extends TestCase {
public void testExpansionIsIdentical() throws IOException {
checkExpansionIsIdentical("");
checkExpansionIsIdentical("/}");
checkExpansionIsIdentical("/}{a,b}");
checkExpansionIsIdentical("{/");
checkExpansionIsIdentical("{a}");
checkExpansionIsIdentical("{a,b}/{b,c}");
checkExpansionIsIdentical("p\\{a/b,c/d\\}s");
checkExpansionIsIdentical("p{a\\/b,c\\/d}s");
}
public void testExpansion() throws IOException {
checkExpansion("{a/b}", "a/b");
checkExpansion("/}{a/b}", "/}a/b");
checkExpansion("p{a/b,c/d}s", "pa/bs", "pc/ds");
checkExpansion("{a/b,c/d,{e,f}}", "a/b", "c/d", "{e,f}");
checkExpansion("{a/b,c/d}{e,f}", "a/b{e,f}", "c/d{e,f}");
checkExpansion("{a,b}/{b,{c/d,e/f}}", "{a,b}/b", "{a,b}/c/d", "{a,b}/e/f");
checkExpansion("{a,b}/{c/\\d}", "{a,b}/c/d");
}
private void checkExpansionIsIdentical(String filePattern) throws IOException {
checkExpansion(filePattern, filePattern);
}
private void checkExpansion(String filePattern, String... expectedExpansions)
throws IOException {
List<String> actualExpansions = GlobExpander.expand(filePattern);
assertEquals("Different number of expansions", expectedExpansions.length,
actualExpansions.size());
for (int i = 0; i < expectedExpansions.length; i++) {
assertEquals("Expansion of " + filePattern, expectedExpansions[i],
actualExpansions.get(i));
}
}
}
| 2,416 | 37.365079 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestCommandFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.fs.shell.CommandFormat.NotEnoughArgumentsException;
import org.apache.hadoop.fs.shell.CommandFormat.TooManyArgumentsException;
import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException;
import org.junit.Before;
import org.junit.Test;
/**
* This class tests the command line parsing
*/
public class TestCommandFormat {
private static List<String> args;
private static List<String> expectedArgs;
private static Set<String> expectedOpts;
@Before
public void setUp() {
args = new ArrayList<String>();
expectedOpts = new HashSet<String>();
expectedArgs = new ArrayList<String>();
}
@Test
public void testNoArgs() {
checkArgLimits(null, 0, 0);
checkArgLimits(null, 0, 1);
checkArgLimits(NotEnoughArgumentsException.class, 1, 1);
checkArgLimits(NotEnoughArgumentsException.class, 1, 2);
}
@Test
public void testOneArg() {
args = listOf("a");
expectedArgs = listOf("a");
checkArgLimits(TooManyArgumentsException.class, 0, 0);
checkArgLimits(null, 0, 1);
checkArgLimits(null, 1, 1);
checkArgLimits(null, 1, 2);
checkArgLimits(NotEnoughArgumentsException.class, 2, 3);
}
@Test
public void testTwoArgs() {
args = listOf("a", "b");
expectedArgs = listOf("a", "b");
checkArgLimits(TooManyArgumentsException.class, 0, 0);
checkArgLimits(TooManyArgumentsException.class, 1, 1);
checkArgLimits(null, 1, 2);
checkArgLimits(null, 2, 2);
checkArgLimits(null, 2, 3);
checkArgLimits(NotEnoughArgumentsException.class, 3, 3);
}
@Test
public void testOneOpt() {
args = listOf("-a");
expectedOpts = setOf("a");
checkArgLimits(UnknownOptionException.class, 0, 0);
checkArgLimits(null, 0, 0, "a", "b");
checkArgLimits(NotEnoughArgumentsException.class, 1, 1, "a", "b");
}
@Test
public void testTwoOpts() {
args = listOf("-a", "-b");
expectedOpts = setOf("a", "b");
checkArgLimits(UnknownOptionException.class, 0, 0);
checkArgLimits(null, 0, 0, "a", "b");
checkArgLimits(null, 0, 1, "a", "b");
checkArgLimits(NotEnoughArgumentsException.class, 1, 1, "a", "b");
}
@Test
public void testOptArg() {
args = listOf("-a", "b");
expectedOpts = setOf("a");
expectedArgs = listOf("b");
checkArgLimits(UnknownOptionException.class, 0, 0);
checkArgLimits(TooManyArgumentsException.class, 0, 0, "a", "b");
checkArgLimits(null, 0, 1, "a", "b");
checkArgLimits(null, 1, 1, "a", "b");
checkArgLimits(null, 1, 2, "a", "b");
checkArgLimits(NotEnoughArgumentsException.class, 2, 2, "a", "b");
}
@Test
public void testArgOpt() {
args = listOf("b", "-a");
expectedArgs = listOf("b", "-a");
checkArgLimits(TooManyArgumentsException.class, 0, 0, "a", "b");
checkArgLimits(null, 1, 2, "a", "b");
checkArgLimits(null, 2, 2, "a", "b");
checkArgLimits(NotEnoughArgumentsException.class, 3, 4, "a", "b");
}
@Test
public void testOptStopOptArg() {
args = listOf("-a", "--", "-b", "c");
expectedOpts = setOf("a");
expectedArgs = listOf("-b", "c");
checkArgLimits(UnknownOptionException.class, 0, 0);
checkArgLimits(TooManyArgumentsException.class, 0, 1, "a", "b");
checkArgLimits(null, 2, 2, "a", "b");
checkArgLimits(NotEnoughArgumentsException.class, 3, 4, "a", "b");
}
@Test
public void testOptDashArg() {
args = listOf("-b", "-", "-c");
expectedOpts = setOf("b");
expectedArgs = listOf("-", "-c");
checkArgLimits(UnknownOptionException.class, 0, 0);
checkArgLimits(TooManyArgumentsException.class, 0, 0, "b", "c");
checkArgLimits(TooManyArgumentsException.class, 1, 1, "b", "c");
checkArgLimits(null, 2, 2, "b", "c");
checkArgLimits(NotEnoughArgumentsException.class, 3, 4, "b", "c");
}
@Test
public void testOldArgsWithIndex() {
String[] arrayArgs = new String[]{"ignore", "-a", "b", "-c"};
{
CommandFormat cf = new CommandFormat(0, 9, "a", "c");
List<String> parsedArgs = cf.parse(arrayArgs, 0);
assertEquals(setOf(), cf.getOpts());
assertEquals(listOf("ignore", "-a", "b", "-c"), parsedArgs);
}
{
CommandFormat cf = new CommandFormat(0, 9, "a", "c");
List<String> parsedArgs = cf.parse(arrayArgs, 1);
assertEquals(setOf("a"), cf.getOpts());
assertEquals(listOf("b", "-c"), parsedArgs);
}
{
CommandFormat cf = new CommandFormat(0, 9, "a", "c");
List<String> parsedArgs = cf.parse(arrayArgs, 2);
assertEquals(setOf(), cf.getOpts());
assertEquals(listOf("b", "-c"), parsedArgs);
}
}
private static <T> CommandFormat checkArgLimits(
Class<? extends IllegalArgumentException> expectedErr,
int min, int max, String ... opts)
{
CommandFormat cf = new CommandFormat(min, max, opts);
List<String> parsedArgs = new ArrayList<String>(args);
Class<?> cfError = null;
try {
cf.parse(parsedArgs);
} catch (IllegalArgumentException e) {
System.out.println(e.getMessage());
cfError = e.getClass();
}
assertEquals(expectedErr, cfError);
if (expectedErr == null) {
assertEquals(expectedArgs, parsedArgs);
assertEquals(expectedOpts, cf.getOpts());
}
return cf;
}
// Don't use generics to avoid warning:
// unchecked generic array creation of type T[] for varargs parameter
private static List<String> listOf(String ... objects) {
return Arrays.asList(objects);
}
private static Set<String> setOf(String ... objects) {
return new HashSet<String>(listOf(objects));
}
}
| 6,684 | 30.682464 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.util.Shell;
import org.junit.BeforeClass;
import java.io.IOException;
import static org.junit.Assume.assumeTrue;
public class TestSymlinkLocalFSFileContext extends TestSymlinkLocalFS {
@BeforeClass
public static void testSetup() throws Exception {
FileContext context = FileContext.getLocalFSFileContext();
wrapper = new FileContextTestWrapper(context);
}
@Override
public void testRenameFileWithDestParentSymlink() throws IOException {
assumeTrue(!Shell.WINDOWS);
super.testRenameFileWithDestParentSymlink();
}
}
| 1,410 | 33.414634 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemTokens.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystemTestHelper.MockFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
public class TestFileSystemTokens {
private static String renewer = "renewer!";
@Test
public void testFsWithNoToken() throws Exception {
MockFileSystem fs = createFileSystemForServiceName(null);
Credentials credentials = new Credentials();
fs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(fs, false);
assertEquals(0, credentials.numberOfTokens());
}
@Test
public void testFsWithToken() throws Exception {
Text service = new Text("singleTokenFs");
MockFileSystem fs = createFileSystemForServiceName(service);
Credentials credentials = new Credentials();
fs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(fs, true);
assertEquals(1, credentials.numberOfTokens());
assertNotNull(credentials.getToken(service));
}
@Test
public void testFsWithTokenExists() throws Exception {
Credentials credentials = new Credentials();
Text service = new Text("singleTokenFs");
MockFileSystem fs = createFileSystemForServiceName(service);
Token<?> token = mock(Token.class);
credentials.addToken(service, token);
fs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(fs, false);
assertEquals(1, credentials.numberOfTokens());
assertSame(token, credentials.getToken(service));
}
@Test
public void testFsWithChildTokens() throws Exception {
Credentials credentials = new Credentials();
Text service1 = new Text("singleTokenFs1");
Text service2 = new Text("singleTokenFs2");
MockFileSystem fs1 = createFileSystemForServiceName(service1);
MockFileSystem fs2 = createFileSystemForServiceName(service2);
MockFileSystem fs3 = createFileSystemForServiceName(null);
MockFileSystem multiFs =
createFileSystemForServiceName(null, fs1, fs2, fs3);
multiFs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(multiFs, false); // has no tokens of own, only child tokens
verifyTokenFetch(fs1, true);
verifyTokenFetch(fs2, true);
verifyTokenFetch(fs3, false);
assertEquals(2, credentials.numberOfTokens());
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
}
@Test
public void testFsWithDuplicateChildren() throws Exception {
Credentials credentials = new Credentials();
Text service = new Text("singleTokenFs1");
MockFileSystem fs = createFileSystemForServiceName(service);
MockFileSystem multiFs =
createFileSystemForServiceName(null, fs, new FilterFileSystem(fs));
multiFs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(multiFs, false);
verifyTokenFetch(fs, true);
assertEquals(1, credentials.numberOfTokens());
assertNotNull(credentials.getToken(service));
}
@Test
public void testFsWithDuplicateChildrenTokenExists() throws Exception {
Credentials credentials = new Credentials();
Text service = new Text("singleTokenFs1");
Token<?> token = mock(Token.class);
credentials.addToken(service, token);
MockFileSystem fs = createFileSystemForServiceName(service);
MockFileSystem multiFs =
createFileSystemForServiceName(null, fs, new FilterFileSystem(fs));
multiFs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(multiFs, false);
verifyTokenFetch(fs, false);
assertEquals(1, credentials.numberOfTokens());
assertSame(token, credentials.getToken(service));
}
@Test
public void testFsWithChildTokensOneExists() throws Exception {
Credentials credentials = new Credentials();
Text service1 = new Text("singleTokenFs1");
Text service2 = new Text("singleTokenFs2");
Token<?> token = mock(Token.class);
credentials.addToken(service2, token);
MockFileSystem fs1 = createFileSystemForServiceName(service1);
MockFileSystem fs2 = createFileSystemForServiceName(service2);
MockFileSystem fs3 = createFileSystemForServiceName(null);
MockFileSystem multiFs = createFileSystemForServiceName(null, fs1, fs2, fs3);
multiFs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(multiFs, false);
verifyTokenFetch(fs1, true);
verifyTokenFetch(fs2, false); // we had added its token to credentials
verifyTokenFetch(fs3, false);
assertEquals(2, credentials.numberOfTokens());
assertNotNull(credentials.getToken(service1));
assertSame(token, credentials.getToken(service2));
}
@Test
public void testFsWithMyOwnAndChildTokens() throws Exception {
Credentials credentials = new Credentials();
Text service1 = new Text("singleTokenFs1");
Text service2 = new Text("singleTokenFs2");
Text myService = new Text("multiTokenFs");
Token<?> token = mock(Token.class);
credentials.addToken(service2, token);
MockFileSystem fs1 = createFileSystemForServiceName(service1);
MockFileSystem fs2 = createFileSystemForServiceName(service2);
MockFileSystem multiFs = createFileSystemForServiceName(myService, fs1, fs2);
multiFs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(multiFs, true); // its own token and also of its children
verifyTokenFetch(fs1, true);
verifyTokenFetch(fs2, false); // we had added its token to credentials
assertEquals(3, credentials.numberOfTokens());
assertNotNull(credentials.getToken(myService));
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
}
@Test
public void testFsWithMyOwnExistsAndChildTokens() throws Exception {
Credentials credentials = new Credentials();
Text service1 = new Text("singleTokenFs1");
Text service2 = new Text("singleTokenFs2");
Text myService = new Text("multiTokenFs");
Token<?> token = mock(Token.class);
credentials.addToken(myService, token);
MockFileSystem fs1 = createFileSystemForServiceName(service1);
MockFileSystem fs2 = createFileSystemForServiceName(service2);
MockFileSystem multiFs = createFileSystemForServiceName(myService, fs1, fs2);
multiFs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(multiFs, false); // we had added its token to credentials
verifyTokenFetch(fs1, true);
verifyTokenFetch(fs2, true);
assertEquals(3, credentials.numberOfTokens());
assertSame(token, credentials.getToken(myService));
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
}
@Test
public void testFsWithNestedDuplicatesChildren() throws Exception {
Credentials credentials = new Credentials();
Text service1 = new Text("singleTokenFs1");
Text service2 = new Text("singleTokenFs2");
Text service4 = new Text("singleTokenFs4");
Text multiService = new Text("multiTokenFs");
Token<?> token2 = mock(Token.class);
credentials.addToken(service2, token2);
MockFileSystem fs1 = createFileSystemForServiceName(service1);
MockFileSystem fs1B = createFileSystemForServiceName(service1);
MockFileSystem fs2 = createFileSystemForServiceName(service2);
MockFileSystem fs3 = createFileSystemForServiceName(null);
MockFileSystem fs4 = createFileSystemForServiceName(service4);
// now let's get dirty! ensure dup tokens aren't fetched even when
// repeated and dupped in a nested fs. fs4 is a real test of the drill
// down: multi-filter-multi-filter-filter-fs4.
MockFileSystem multiFs = createFileSystemForServiceName(multiService,
fs1, fs1B, fs2, fs2, new FilterFileSystem(fs3),
new FilterFileSystem(new FilterFileSystem(fs4)));
MockFileSystem superMultiFs = createFileSystemForServiceName(null,
fs1, fs1B, fs1, new FilterFileSystem(fs3), new FilterFileSystem(multiFs));
superMultiFs.addDelegationTokens(renewer, credentials);
verifyTokenFetch(superMultiFs, false); // does not have its own token
verifyTokenFetch(multiFs, true); // has its own token
verifyTokenFetch(fs1, true);
verifyTokenFetch(fs2, false); // we had added its token to credentials
verifyTokenFetch(fs3, false); // has no tokens
verifyTokenFetch(fs4, true);
assertEquals(4, credentials.numberOfTokens()); //fs1+fs2+fs4+multifs (fs3=0)
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
assertSame(token2, credentials.getToken(service2));
assertNotNull(credentials.getToken(multiService));
assertNotNull(credentials.getToken(service4));
}
public static MockFileSystem createFileSystemForServiceName(
final Text service, final FileSystem... children) throws IOException {
final MockFileSystem fs = new MockFileSystem();
final MockFileSystem mockFs = fs.getRawFileSystem();
if (service != null) {
when(mockFs.getCanonicalServiceName()).thenReturn(service.toString());
when(mockFs.getDelegationToken(any(String.class))).thenAnswer(
new Answer<Token<?>>() {
@Override
public Token<?> answer(InvocationOnMock invocation) throws Throwable {
Token<?> token = new Token<TokenIdentifier>();
token.setService(service);
return token;
}
});
}
when(mockFs.getChildFileSystems()).thenReturn(children);
return fs;
}
// check that canonical name was requested, if renewer is not null that
// a token was requested, and that child fs was invoked
private void verifyTokenFetch(MockFileSystem fs, boolean expected) throws IOException {
verify(fs.getRawFileSystem(), atLeast(1)).getCanonicalServiceName();
if (expected) {
verify(fs.getRawFileSystem()).getDelegationToken(renewer);
} else {
verify(fs.getRawFileSystem(), never()).getDelegationToken(any(String.class));
}
verify(fs.getRawFileSystem(), atLeast(1)).getChildFileSystems();
}
}
| 11,219 | 39.071429 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.HashSet;
import java.util.Set;
import static org.junit.Assert.*;
/**
* This test class checks basic operations with {@link HarFileSystem} including
* various initialization cases, getters, and modification methods.
*
* NB: to run this test from an IDE make sure the folder
* "hadoop-common-project/hadoop-common/src/main/resources/" is added as a
* source path. This will allow the system to pick up the "core-default.xml" and
* "META-INF/services/..." resources from the class-path in the runtime.
*/
public class TestHarFileSystemBasics {
private static final String ROOT_PATH = System.getProperty("test.build.data",
"build/test/data");
private static final Path rootPath;
static {
String root = new Path(new File(ROOT_PATH).getAbsolutePath(), "localfs")
.toUri().getPath();
// Strip drive specifier on Windows, which would make the HAR URI invalid and
// cause tests to fail.
if (Shell.WINDOWS) {
root = root.substring(root.indexOf(':') + 1);
}
rootPath = new Path(root);
}
// NB: .har suffix is necessary
private static final Path harPath = new Path(rootPath, "path1/path2/my.har");
private FileSystem localFileSystem;
private HarFileSystem harFileSystem;
private Configuration conf;
/*
* creates and returns fully initialized HarFileSystem
*/
private HarFileSystem createHarFileSystem(final Configuration conf)
throws Exception {
localFileSystem = FileSystem.getLocal(conf);
localFileSystem.initialize(new URI("file:///"), conf);
localFileSystem.mkdirs(rootPath);
localFileSystem.mkdirs(harPath);
final Path indexPath = new Path(harPath, "_index");
final Path masterIndexPath = new Path(harPath, "_masterindex");
localFileSystem.createNewFile(indexPath);
assertTrue(localFileSystem.exists(indexPath));
localFileSystem.createNewFile(masterIndexPath);
assertTrue(localFileSystem.exists(masterIndexPath));
writeVersionToMasterIndexImpl(HarFileSystem.VERSION, masterIndexPath);
final HarFileSystem harFileSystem = new HarFileSystem(localFileSystem);
final URI uri = new URI("har://" + harPath.toString());
harFileSystem.initialize(uri, conf);
return harFileSystem;
}
private HarFileSystem createHarFileSystem(final Configuration conf, Path aHarPath)
throws Exception {
localFileSystem.mkdirs(aHarPath);
final Path indexPath = new Path(aHarPath, "_index");
final Path masterIndexPath = new Path(aHarPath, "_masterindex");
localFileSystem.createNewFile(indexPath);
assertTrue(localFileSystem.exists(indexPath));
localFileSystem.createNewFile(masterIndexPath);
assertTrue(localFileSystem.exists(masterIndexPath));
writeVersionToMasterIndexImpl(HarFileSystem.VERSION, masterIndexPath);
final HarFileSystem harFileSystem = new HarFileSystem(localFileSystem);
final URI uri = new URI("har://" + aHarPath.toString());
harFileSystem.initialize(uri, conf);
return harFileSystem;
}
private void writeVersionToMasterIndexImpl(int version, Path masterIndexPath) throws IOException {
// write Har version into the master index:
final FSDataOutputStream fsdos = localFileSystem.create(masterIndexPath);
try {
String versionString = version + "\n";
fsdos.write(versionString.getBytes("UTF-8"));
fsdos.flush();
} finally {
fsdos.close();
}
}
@Before
public void before() throws Exception {
final File rootDirIoFile = new File(rootPath.toUri().getPath());
rootDirIoFile.mkdirs();
if (!rootDirIoFile.exists()) {
throw new IOException("Failed to create temp directory ["
+ rootDirIoFile.getAbsolutePath() + "]");
}
// create Har to test:
conf = new Configuration();
harFileSystem = createHarFileSystem(conf);
}
@After
public void after() throws Exception {
// close Har FS:
final FileSystem harFS = harFileSystem;
if (harFS != null) {
harFS.close();
harFileSystem = null;
}
// cleanup: delete all the temporary files:
final File rootDirIoFile = new File(rootPath.toUri().getPath());
if (rootDirIoFile.exists()) {
FileUtil.fullyDelete(rootDirIoFile);
}
if (rootDirIoFile.exists()) {
throw new IOException("Failed to delete temp directory ["
+ rootDirIoFile.getAbsolutePath() + "]");
}
}
// ======== Positive tests:
@Test
public void testPositiveHarFileSystemBasics() throws Exception {
// check Har version:
assertEquals(HarFileSystem.VERSION, harFileSystem.getHarVersion());
// check Har URI:
final URI harUri = harFileSystem.getUri();
assertEquals(harPath.toUri().getPath(), harUri.getPath());
assertEquals("har", harUri.getScheme());
// check Har home path:
final Path homePath = harFileSystem.getHomeDirectory();
assertEquals(harPath.toUri().getPath(), homePath.toUri().getPath());
// check working directory:
final Path workDirPath0 = harFileSystem.getWorkingDirectory();
assertEquals(homePath, workDirPath0);
// check that its impossible to reset the working directory
// (#setWorkingDirectory should have no effect):
harFileSystem.setWorkingDirectory(new Path("/foo/bar"));
assertEquals(workDirPath0, harFileSystem.getWorkingDirectory());
}
@Test
public void testPositiveNewHarFsOnTheSameUnderlyingFs() throws Exception {
// Init 2nd har file system on the same underlying FS, so the
// metadata gets reused:
final HarFileSystem hfs = new HarFileSystem(localFileSystem);
final URI uri = new URI("har://" + harPath.toString());
hfs.initialize(uri, new Configuration());
// the metadata should be reused from cache:
assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
}
@Test
public void testPositiveLruMetadataCacheFs() throws Exception {
// Init 2nd har file system on the same underlying FS, so the
// metadata gets reused:
HarFileSystem hfs = new HarFileSystem(localFileSystem);
URI uri = new URI("har://" + harPath.toString());
hfs.initialize(uri, new Configuration());
// the metadata should be reused from cache:
assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
// Create more hars, until the cache is full + 1; the last creation should evict the first entry from the cache
for (int i = 0; i <= hfs.METADATA_CACHE_ENTRIES_DEFAULT; i++) {
Path p = new Path(rootPath, "path1/path2/my" + i +".har");
createHarFileSystem(conf, p);
}
// The first entry should not be in the cache anymore:
hfs = new HarFileSystem(localFileSystem);
uri = new URI("har://" + harPath.toString());
hfs.initialize(uri, new Configuration());
assertTrue(hfs.getMetadata() != harFileSystem.getMetadata());
}
@Test
public void testPositiveInitWithoutUnderlyingFS() throws Exception {
// Init HarFS with no constructor arg, so that the underlying FS object
// is created on demand or got from cache in #initialize() method.
final HarFileSystem hfs = new HarFileSystem();
final URI uri = new URI("har://" + harPath.toString());
hfs.initialize(uri, new Configuration());
}
@Test
public void testPositiveListFilesNotEndInColon() throws Exception {
// re-initialize the har file system with host name
// make sure the qualified path name does not append ":" at the end of host name
final URI uri = new URI("har://file-localhost" + harPath.toString());
harFileSystem.initialize(uri, conf);
Path p1 = new Path("har://file-localhost" + harPath.toString());
Path p2 = harFileSystem.makeQualified(p1);
assertTrue(p2.toUri().toString().startsWith("har://file-localhost/"));
}
@Test
public void testListLocatedStatus() throws Exception {
String testHarPath = this.getClass().getResource("/test.har").getPath();
URI uri = new URI("har://" + testHarPath);
HarFileSystem hfs = new HarFileSystem(localFileSystem);
hfs.initialize(uri, new Configuration());
// test.har has the following contents:
// dir1/1.txt
// dir1/2.txt
Set<String> expectedFileNames = new HashSet<String>();
expectedFileNames.add("1.txt");
expectedFileNames.add("2.txt");
// List contents of dir, and ensure we find all expected files
Path path = new Path("dir1");
RemoteIterator<LocatedFileStatus> fileList = hfs.listLocatedStatus(path);
while (fileList.hasNext()) {
String fileName = fileList.next().getPath().getName();
assertTrue(fileName + " not in expected files list", expectedFileNames.contains(fileName));
expectedFileNames.remove(fileName);
}
assertEquals("Didn't find all of the expected file names: " + expectedFileNames,
0, expectedFileNames.size());
}
@Test
public void testMakeQualifiedPath() throws Exception {
// Construct a valid har file system path with authority that
// contains userinfo and port. The userinfo and port are useless
// in local fs uri. They are only used to verify har file system
// can correctly preserve the information for the underlying file system.
String harPathWithUserinfo = "har://file-user:passwd@localhost:80"
+ harPath.toUri().getPath().toString();
Path path = new Path(harPathWithUserinfo);
Path qualifiedPath = path.getFileSystem(conf).makeQualified(path);
assertTrue(String.format(
"The qualified path (%s) did not match the expected path (%s).",
qualifiedPath.toString(), harPathWithUserinfo),
qualifiedPath.toString().equals(harPathWithUserinfo));
}
// ========== Negative:
@Test
public void testNegativeInitWithoutIndex() throws Exception {
// delete the index file:
final Path indexPath = new Path(harPath, "_index");
localFileSystem.delete(indexPath, false);
// now init the HarFs:
final HarFileSystem hfs = new HarFileSystem(localFileSystem);
final URI uri = new URI("har://" + harPath.toString());
try {
hfs.initialize(uri, new Configuration());
Assert.fail("Exception expected.");
} catch (IOException ioe) {
// ok, expected.
}
}
@Test
public void testNegativeGetHarVersionOnNotInitializedFS() throws Exception {
final HarFileSystem hfs = new HarFileSystem(localFileSystem);
try {
int version = hfs.getHarVersion();
Assert.fail("Exception expected, but got a Har version " + version + ".");
} catch (IOException ioe) {
// ok, expected.
}
}
@Test
public void testNegativeInitWithAnUnsupportedVersion() throws Exception {
// NB: should wait at least 1 second to ensure the timestamp of the master
// index will change upon the writing, because Linux seems to update the
// file modification
// time with 1 second accuracy:
Thread.sleep(1000);
// write an unsupported version:
writeVersionToMasterIndexImpl(7777, new Path(harPath, "_masterindex"));
// init the Har:
final HarFileSystem hfs = new HarFileSystem(localFileSystem);
// the metadata should *not* be reused from cache:
assertFalse(hfs.getMetadata() == harFileSystem.getMetadata());
final URI uri = new URI("har://" + harPath.toString());
try {
hfs.initialize(uri, new Configuration());
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
}
@Test
public void testNegativeHarFsModifications() throws Exception {
// all the modification methods of HarFS must lead to IOE.
final Path fooPath = new Path(rootPath, "foo/bar");
localFileSystem.createNewFile(fooPath);
try {
harFileSystem.create(fooPath, new FsPermission("+rwx"), true, 1024,
(short) 88, 1024, null);
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
try {
harFileSystem.setReplication(fooPath, (short) 55);
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
try {
harFileSystem.delete(fooPath, true);
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
try {
harFileSystem.mkdirs(fooPath, new FsPermission("+rwx"));
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
final Path indexPath = new Path(harPath, "_index");
try {
harFileSystem.copyFromLocalFile(false, indexPath, fooPath);
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
try {
harFileSystem.startLocalOutput(fooPath, indexPath);
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
try {
harFileSystem.completeLocalOutput(fooPath, indexPath);
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
try {
harFileSystem.setOwner(fooPath, "user", "group");
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
try {
harFileSystem.setPermission(fooPath, new FsPermission("+x"));
Assert.fail("IOException expected.");
} catch (IOException ioe) {
// ok, expected.
}
}
@Test
public void testHarFsWithoutAuthority() throws Exception {
final URI uri = harFileSystem.getUri();
Assert.assertNull("har uri authority not null: " + uri, uri.getAuthority());
FileContext.getFileContext(uri, conf);
}
}
| 14,685 | 34.90709 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.FileContextTestHelper.readFile;
import static org.apache.hadoop.fs.FileContextTestHelper.writeFile;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* <p>
* A collection of Util tests for the {@link FileContext#util()}.
* This test should be used for testing an instance of {@link FileContext#util()}
* that has been initialized to a specific default FileSystem such a
* LocalFileSystem, HDFS,S3, etc.
* </p>
* <p>
* To test a given {@link FileSystem} implementation create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fc</code>
* {@link FileContext} instance variable.
*
* </p>
*/
public abstract class FileContextUtilBase {
protected final FileContextTestHelper fileContextTestHelper = new FileContextTestHelper();
protected FileContext fc;
{
try {
((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
.setLevel(org.apache.log4j.Level.DEBUG);
} catch(Exception e) {
System.out.println("Cannot change log level\n"
+ StringUtils.stringifyException(e));
}
}
@Before
public void setUp() throws Exception {
fc.mkdir(fileContextTestHelper.getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
}
@After
public void tearDown() throws Exception {
fc.delete(fileContextTestHelper.getTestRootPath(fc), true);
}
@Test
public void testFcCopy() throws Exception{
final String ts = "some random text";
Path file1 = fileContextTestHelper.getTestRootPath(fc, "file1");
Path file2 = fileContextTestHelper.getTestRootPath(fc, "file2");
writeFile(fc, file1, ts.getBytes());
assertTrue(fc.util().exists(file1));
fc.util().copy(file1, file2);
// verify that newly copied file2 exists
assertTrue("Failed to copy file2 ", fc.util().exists(file2));
// verify that file2 contains test string
assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),
readFile(fc,file2,ts.getBytes().length)));
}
@Test
public void testRecursiveFcCopy() throws Exception {
final String ts = "some random text";
Path dir1 = fileContextTestHelper.getTestRootPath(fc, "dir1");
Path dir2 = fileContextTestHelper.getTestRootPath(fc, "dir2");
Path file1 = new Path(dir1, "file1");
fc.mkdir(dir1, null, false);
writeFile(fc, file1, ts.getBytes());
assertTrue(fc.util().exists(file1));
Path file2 = new Path(dir2, "file1");
fc.util().copy(dir1, dir2);
// verify that newly copied file2 exists
assertTrue("Failed to copy file2 ", fc.util().exists(file2));
// verify that file2 contains test string
assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),
readFile(fc,file2,ts.getBytes().length)));
}
}
| 3,774 | 33.633028 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.*;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.URI;
import java.io.PrintWriter;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.apache.tools.tar.TarEntry;
import org.apache.tools.tar.TarOutputStream;
import javax.print.attribute.URISyntax;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestFileUtil {
private static final Log LOG = LogFactory.getLog(TestFileUtil.class);
private static final String TEST_ROOT_DIR = System.getProperty(
"test.build.data", "/tmp") + "/fu";
private static final File TEST_DIR = new File(TEST_ROOT_DIR);
private static final String FILE = "x";
private static final String LINK = "y";
private static final String DIR = "dir";
private final File del = new File(TEST_DIR, "del");
private final File tmp = new File(TEST_DIR, "tmp");
private final File dir1 = new File(del, DIR + "1");
private final File dir2 = new File(del, DIR + "2");
private final File partitioned = new File(TEST_DIR, "partitioned");
private InetAddress inet1;
private InetAddress inet2;
private InetAddress inet3;
private InetAddress inet4;
private InetAddress inet5;
private InetAddress inet6;
private URI uri1;
private URI uri2;
private URI uri3;
private URI uri4;
private URI uri5;
private URI uri6;
private FileSystem fs1;
private FileSystem fs2;
private FileSystem fs3;
private FileSystem fs4;
private FileSystem fs5;
private FileSystem fs6;
/**
* Creates multiple directories for testing.
*
* Contents of them are
* dir:tmp:
* file: x
* dir:del:
* file: x
* dir: dir1 : file:x
* dir: dir2 : file:x
* link: y to tmp/x
* link: tmpDir to tmp
* dir:partitioned:
* file: part-r-00000, contents: "foo"
* file: part-r-00001, contents: "bar"
*/
@Ignore
private void setupDirs() throws IOException {
Assert.assertFalse(del.exists());
Assert.assertFalse(tmp.exists());
Assert.assertFalse(partitioned.exists());
del.mkdirs();
tmp.mkdirs();
partitioned.mkdirs();
new File(del, FILE).createNewFile();
File tmpFile = new File(tmp, FILE);
tmpFile.createNewFile();
// create directories
dir1.mkdirs();
dir2.mkdirs();
new File(dir1, FILE).createNewFile();
new File(dir2, FILE).createNewFile();
// create a symlink to file
File link = new File(del, LINK);
FileUtil.symLink(tmpFile.toString(), link.toString());
// create a symlink to dir
File linkDir = new File(del, "tmpDir");
FileUtil.symLink(tmp.toString(), linkDir.toString());
Assert.assertEquals(5, del.listFiles().length);
// create files in partitioned directories
createFile(partitioned, "part-r-00000", "foo");
createFile(partitioned, "part-r-00001", "bar");
// create a cycle using symlinks. Cycles should be handled
FileUtil.symLink(del.toString(), dir1.toString() + "/cycle");
}
/**
* Creates a new file in the specified directory, with the specified name and
* the specified file contents. This method will add a newline terminator to
* the end of the contents string in the destination file.
* @param directory File non-null destination directory.
* @param name String non-null file name.
* @param contents String non-null file contents.
* @throws IOException if an I/O error occurs.
*/
private File createFile(File directory, String name, String contents)
throws IOException {
File newFile = new File(directory, name);
PrintWriter pw = new PrintWriter(newFile);
try {
pw.println(contents);
}
finally {
pw.close();
}
return newFile;
}
@Test (timeout = 30000)
public void testListFiles() throws IOException {
setupDirs();
//Test existing files case
File[] files = FileUtil.listFiles(partitioned);
Assert.assertEquals(2, files.length);
//Test existing directory with no files case
File newDir = new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir", newDir.exists());
files = FileUtil.listFiles(newDir);
Assert.assertEquals(0, files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir", newDir.exists());
//Test non-existing directory case, this throws
//IOException
try {
files = FileUtil.listFiles(newDir);
Assert.fail("IOException expected on listFiles() for non-existent dir "
+ newDir.toString());
} catch(IOException ioe) {
//Expected an IOException
}
}
@Test (timeout = 30000)
public void testListAPI() throws IOException {
setupDirs();
//Test existing files case
String[] files = FileUtil.list(partitioned);
Assert.assertEquals("Unexpected number of pre-existing files", 2, files.length);
//Test existing directory with no files case
File newDir = new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir", newDir.exists());
files = FileUtil.list(newDir);
Assert.assertEquals("New directory unexpectedly contains files", 0, files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir", newDir.exists());
//Test non-existing directory case, this throws
//IOException
try {
files = FileUtil.list(newDir);
Assert.fail("IOException expected on list() for non-existent dir "
+ newDir.toString());
} catch(IOException ioe) {
//Expected an IOException
}
}
@Before
public void before() throws IOException {
cleanupImpl();
}
@After
public void tearDown() throws IOException {
cleanupImpl();
}
private void cleanupImpl() throws IOException {
FileUtil.fullyDelete(del, true);
Assert.assertTrue(!del.exists());
FileUtil.fullyDelete(tmp, true);
Assert.assertTrue(!tmp.exists());
FileUtil.fullyDelete(partitioned, true);
Assert.assertTrue(!partitioned.exists());
}
@Test (timeout = 30000)
public void testFullyDelete() throws IOException {
setupDirs();
boolean ret = FileUtil.fullyDelete(del);
Assert.assertTrue(ret);
Assert.assertFalse(del.exists());
validateTmpDir();
}
/**
* Tests if fullyDelete deletes
* (a) symlink to file only and not the file pointed to by symlink.
* (b) symlink to dir only and not the dir pointed to by symlink.
* @throws IOException
*/
@Test (timeout = 30000)
public void testFullyDeleteSymlinks() throws IOException {
setupDirs();
File link = new File(del, LINK);
Assert.assertEquals(5, del.list().length);
// Since tmpDir is symlink to tmp, fullyDelete(tmpDir) should not
// delete contents of tmp. See setupDirs for details.
boolean ret = FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertFalse(link.exists());
Assert.assertEquals(4, del.list().length);
validateTmpDir();
File linkDir = new File(del, "tmpDir");
// Since tmpDir is symlink to tmp, fullyDelete(tmpDir) should not
// delete contents of tmp. See setupDirs for details.
ret = FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertFalse(linkDir.exists());
Assert.assertEquals(3, del.list().length);
validateTmpDir();
}
/**
* Tests if fullyDelete deletes
* (a) dangling symlink to file properly
* (b) dangling symlink to directory properly
* @throws IOException
*/
@Test (timeout = 30000)
public void testFullyDeleteDanglingSymlinks() throws IOException {
setupDirs();
// delete the directory tmp to make tmpDir a dangling link to dir tmp and
// to make y as a dangling link to file tmp/x
boolean ret = FileUtil.fullyDelete(tmp);
Assert.assertTrue(ret);
Assert.assertFalse(tmp.exists());
// dangling symlink to file
File link = new File(del, LINK);
Assert.assertEquals(5, del.list().length);
// Even though 'y' is dangling symlink to file tmp/x, fullyDelete(y)
// should delete 'y' properly.
ret = FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertEquals(4, del.list().length);
// dangling symlink to directory
File linkDir = new File(del, "tmpDir");
// Even though tmpDir is dangling symlink to tmp, fullyDelete(tmpDir) should
// delete tmpDir properly.
ret = FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertEquals(3, del.list().length);
}
@Test (timeout = 30000)
public void testFullyDeleteContents() throws IOException {
setupDirs();
boolean ret = FileUtil.fullyDeleteContents(del);
Assert.assertTrue(ret);
Assert.assertTrue(del.exists());
Assert.assertEquals(0, del.listFiles().length);
validateTmpDir();
}
private void validateTmpDir() {
Assert.assertTrue(tmp.exists());
Assert.assertEquals(1, tmp.listFiles().length);
Assert.assertTrue(new File(tmp, FILE).exists());
}
private final File xSubDir = new File(del, "xSubDir");
private final File xSubSubDir = new File(xSubDir, "xSubSubDir");
private final File ySubDir = new File(del, "ySubDir");
private static final String file1Name = "file1";
private final File file2 = new File(xSubDir, "file2");
private final File file22 = new File(xSubSubDir, "file22");
private final File file3 = new File(ySubDir, "file3");
private final File zlink = new File(del, "zlink");
/**
* Creates a directory which can not be deleted completely.
*
* Directory structure. The naming is important in that {@link MyFile}
* is used to return them in alphabetical order when listed.
*
* del(+w)
* |
* .---------------------------------------,
* | | | |
* file1(!w) xSubDir(-rwx) ySubDir(+w) zlink
* | | |
* | file2(-rwx) file3
* |
* xSubSubDir(-rwx)
* |
* file22(-rwx)
*
* @throws IOException
*/
private void setupDirsAndNonWritablePermissions() throws IOException {
Assert.assertFalse("The directory del should not have existed!",
del.exists());
del.mkdirs();
new MyFile(del, file1Name).createNewFile();
// "file1" is non-deletable by default, see MyFile.delete().
xSubDir.mkdirs();
file2.createNewFile();
xSubSubDir.mkdirs();
file22.createNewFile();
revokePermissions(file22);
revokePermissions(xSubSubDir);
revokePermissions(file2);
revokePermissions(xSubDir);
ySubDir.mkdirs();
file3.createNewFile();
Assert.assertFalse("The directory tmp should not have existed!",
tmp.exists());
tmp.mkdirs();
File tmpFile = new File(tmp, FILE);
tmpFile.createNewFile();
FileUtil.symLink(tmpFile.toString(), zlink.toString());
}
private static void grantPermissions(final File f) {
FileUtil.setReadable(f, true);
FileUtil.setWritable(f, true);
FileUtil.setExecutable(f, true);
}
private static void revokePermissions(final File f) {
FileUtil.setWritable(f, false);
FileUtil.setExecutable(f, false);
FileUtil.setReadable(f, false);
}
// Validates the return value.
// Validates the existence of the file "file1"
private void validateAndSetWritablePermissions(
final boolean expectedRevokedPermissionDirsExist, final boolean ret) {
grantPermissions(xSubDir);
grantPermissions(xSubSubDir);
Assert.assertFalse("The return value should have been false.", ret);
Assert.assertTrue("The file file1 should not have been deleted.",
new File(del, file1Name).exists());
Assert.assertEquals(
"The directory xSubDir *should* not have been deleted.",
expectedRevokedPermissionDirsExist, xSubDir.exists());
Assert.assertEquals("The file file2 *should* not have been deleted.",
expectedRevokedPermissionDirsExist, file2.exists());
Assert.assertEquals(
"The directory xSubSubDir *should* not have been deleted.",
expectedRevokedPermissionDirsExist, xSubSubDir.exists());
Assert.assertEquals("The file file22 *should* not have been deleted.",
expectedRevokedPermissionDirsExist, file22.exists());
Assert.assertFalse("The directory ySubDir should have been deleted.",
ySubDir.exists());
Assert.assertFalse("The link zlink should have been deleted.",
zlink.exists());
}
@Test (timeout = 30000)
public void testFailFullyDelete() throws IOException {
if(Shell.WINDOWS) {
// windows Dir.setWritable(false) does not work for directories
return;
}
LOG.info("Running test to verify failure of fullyDelete()");
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDelete(new MyFile(del));
validateAndSetWritablePermissions(true, ret);
}
@Test (timeout = 30000)
public void testFailFullyDeleteGrantPermissions() throws IOException {
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDelete(new MyFile(del), true);
// this time the directories with revoked permissions *should* be deleted:
validateAndSetWritablePermissions(false, ret);
}
/**
* Extend {@link File}. Same as {@link File} except for two things: (1) This
* treats file1Name as a very special file which is not delete-able
* irrespective of it's parent-dir's permissions, a peculiar file instance for
* testing. (2) It returns the files in alphabetically sorted order when
* listed.
*
*/
public static class MyFile extends File {
private static final long serialVersionUID = 1L;
public MyFile(File f) {
super(f.getAbsolutePath());
}
public MyFile(File parent, String child) {
super(parent, child);
}
/**
* Same as {@link File#delete()} except for file1Name which will never be
* deleted (hard-coded)
*/
@Override
public boolean delete() {
LOG.info("Trying to delete myFile " + getAbsolutePath());
boolean bool = false;
if (getName().equals(file1Name)) {
bool = false;
} else {
bool = super.delete();
}
if (bool) {
LOG.info("Deleted " + getAbsolutePath() + " successfully");
} else {
LOG.info("Cannot delete " + getAbsolutePath());
}
return bool;
}
/**
* Return the list of files in an alphabetically sorted order
*/
@Override
public File[] listFiles() {
final File[] files = super.listFiles();
if (files == null) {
return null;
}
List<File> filesList = Arrays.asList(files);
Collections.sort(filesList);
File[] myFiles = new MyFile[files.length];
int i=0;
for(File f : filesList) {
myFiles[i++] = new MyFile(f);
}
return myFiles;
}
}
@Test (timeout = 30000)
public void testFailFullyDeleteContents() throws IOException {
if(Shell.WINDOWS) {
// windows Dir.setWritable(false) does not work for directories
return;
}
LOG.info("Running test to verify failure of fullyDeleteContents()");
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del));
validateAndSetWritablePermissions(true, ret);
}
@Test (timeout = 30000)
public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del), true);
// this time the directories with revoked permissions *should* be deleted:
validateAndSetWritablePermissions(false, ret);
}
@Test (timeout = 30000)
public void testCopyMergeSingleDirectory() throws IOException {
setupDirs();
boolean copyMergeResult = copyMerge("partitioned", "tmp/merged");
Assert.assertTrue("Expected successful copyMerge result.", copyMergeResult);
File merged = new File(TEST_DIR, "tmp/merged");
Assert.assertTrue("File tmp/merged must exist after copyMerge.",
merged.exists());
BufferedReader rdr = new BufferedReader(new FileReader(merged));
try {
Assert.assertEquals("Line 1 of merged file must contain \"foo\".",
"foo", rdr.readLine());
Assert.assertEquals("Line 2 of merged file must contain \"bar\".",
"bar", rdr.readLine());
Assert.assertNull("Expected end of file reading merged file.",
rdr.readLine());
}
finally {
rdr.close();
}
}
/**
* Calls FileUtil.copyMerge using the specified source and destination paths.
* Both source and destination are assumed to be on the local file system.
* The call will not delete source on completion and will not add an
* additional string between files.
* @param src String non-null source path.
* @param dst String non-null destination path.
* @return boolean true if the call to FileUtil.copyMerge was successful.
* @throws IOException if an I/O error occurs.
*/
private boolean copyMerge(String src, String dst)
throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
final boolean result;
try {
Path srcPath = new Path(TEST_ROOT_DIR, src);
Path dstPath = new Path(TEST_ROOT_DIR, dst);
boolean deleteSource = false;
String addString = null;
result = FileUtil.copyMerge(fs, srcPath, fs, dstPath, deleteSource, conf,
addString);
}
finally {
fs.close();
}
return result;
}
/**
* Test that getDU is able to handle cycles caused due to symbolic links
* and that directory sizes are not added to the final calculated size
* @throws IOException
*/
@Test (timeout = 30000)
public void testGetDU() throws Exception {
setupDirs();
long du = FileUtil.getDU(TEST_DIR);
// Only two files (in partitioned). Each has 3 characters + system-specific
// line separator.
final long expected = 2 * (3 + System.getProperty("line.separator").length());
Assert.assertEquals(expected, du);
// target file does not exist:
final File doesNotExist = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
long duDoesNotExist = FileUtil.getDU(doesNotExist);
assertEquals(0, duDoesNotExist);
// target file is not a directory:
File notADirectory = new File(partitioned, "part-r-00000");
long duNotADirectoryActual = FileUtil.getDU(notADirectory);
long duNotADirectoryExpected = 3 + System.getProperty("line.separator").length();
assertEquals(duNotADirectoryExpected, duNotADirectoryActual);
try {
// one of target files is not accessible, but the containing directory
// is accessible:
try {
FileUtil.chmod(notADirectory.getAbsolutePath(), "0000");
} catch (InterruptedException ie) {
// should never happen since that method never throws InterruptedException.
assertNull(ie);
}
assertFalse(FileUtil.canRead(notADirectory));
final long du3 = FileUtil.getDU(partitioned);
assertEquals(expected, du3);
// some target files and containing directory are not accessible:
try {
FileUtil.chmod(partitioned.getAbsolutePath(), "0000");
} catch (InterruptedException ie) {
// should never happen since that method never throws InterruptedException.
assertNull(ie);
}
assertFalse(FileUtil.canRead(partitioned));
final long du4 = FileUtil.getDU(partitioned);
assertEquals(0, du4);
} finally {
// Restore the permissions so that we can delete the folder
// in @After method:
FileUtil.chmod(partitioned.getAbsolutePath(), "0777", true/*recursive*/);
}
}
@Test (timeout = 30000)
public void testUnTar() throws IOException {
setupDirs();
// make a simple tar:
final File simpleTar = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleTar);
TarOutputStream tos = new TarOutputStream(os);
try {
TarEntry te = new TarEntry("/bar/foo");
byte[] data = "some-content".getBytes("UTF-8");
te.setSize(data.length);
tos.putNextEntry(te);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
} finally {
tos.close();
}
// successfully untar it into an existing dir:
FileUtil.unTar(simpleTar, tmp);
// check result:
assertTrue(new File(tmp, "/bar/foo").exists());
assertEquals(12, new File(tmp, "/bar/foo").length());
final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
regularFile.createNewFile();
assertTrue(regularFile.exists());
try {
FileUtil.unTar(simpleTar, regularFile);
assertTrue("An IOException expected.", false);
} catch (IOException ioe) {
// okay
}
}
@Test (timeout = 30000)
public void testReplaceFile() throws IOException {
setupDirs();
final File srcFile = new File(tmp, "src");
// src exists, and target does not exist:
srcFile.createNewFile();
assertTrue(srcFile.exists());
final File targetFile = new File(tmp, "target");
assertTrue(!targetFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
// src exists and target is a regular file:
srcFile.createNewFile();
assertTrue(srcFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
// src exists, and target is a non-empty directory:
srcFile.createNewFile();
assertTrue(srcFile.exists());
targetFile.delete();
targetFile.mkdirs();
File obstacle = new File(targetFile, "obstacle");
obstacle.createNewFile();
assertTrue(obstacle.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
try {
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(false);
} catch (IOException ioe) {
// okay
}
// check up the post-condition: nothing is deleted:
assertTrue(srcFile.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
assertTrue(obstacle.exists());
}
@Test (timeout = 30000)
public void testCreateLocalTempFile() throws IOException {
setupDirs();
final File baseFile = new File(tmp, "base");
File tmp1 = FileUtil.createLocalTempFile(baseFile, "foo", false);
File tmp2 = FileUtil.createLocalTempFile(baseFile, "foo", true);
assertFalse(tmp1.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertFalse(tmp2.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertTrue(tmp1.exists() && tmp2.exists());
assertTrue(tmp1.canWrite() && tmp2.canWrite());
assertTrue(tmp1.canRead() && tmp2.canRead());
tmp1.delete();
tmp2.delete();
assertTrue(!tmp1.exists() && !tmp2.exists());
}
@Test (timeout = 30000)
public void testUnZip() throws IOException {
// make sa simple zip
setupDirs();
// make a simple tar:
final File simpleZip = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleZip);
ZipOutputStream tos = new ZipOutputStream(os);
try {
ZipEntry ze = new ZipEntry("foo");
byte[] data = "some-content".getBytes("UTF-8");
ze.setSize(data.length);
tos.putNextEntry(ze);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
} finally {
tos.close();
}
// successfully untar it into an existing dir:
FileUtil.unZip(simpleZip, tmp);
// check result:
assertTrue(new File(tmp, "foo").exists());
assertEquals(12, new File(tmp, "foo").length());
final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
regularFile.createNewFile();
assertTrue(regularFile.exists());
try {
FileUtil.unZip(simpleZip, regularFile);
assertTrue("An IOException expected.", false);
} catch (IOException ioe) {
// okay
}
}
@Test (timeout = 30000)
/*
* Test method copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf)
*/
public void testCopy5() throws IOException {
setupDirs();
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.newInstance(uri, conf);
final String content = "some-content";
File srcFile = createFile(tmp, "src", content);
Path srcPath = new Path(srcFile.toURI());
// copy regular file:
final File dest = new File(del, "dest");
boolean result = FileUtil.copy(fs, srcPath, dest, false, conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length
+ System.getProperty("line.separator").getBytes().length, dest.length());
assertTrue(srcFile.exists()); // should not be deleted
// copy regular file, delete src:
dest.delete();
assertTrue(!dest.exists());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length
+ System.getProperty("line.separator").getBytes().length, dest.length());
assertTrue(!srcFile.exists()); // should be deleted
// copy a dir:
dest.delete();
assertTrue(!dest.exists());
srcPath = new Path(partitioned.toURI());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
assertTrue(dest.exists() && dest.isDirectory());
File[] files = dest.listFiles();
assertTrue(files != null);
assertEquals(2, files.length);
for (File f: files) {
assertEquals(3
+ System.getProperty("line.separator").getBytes().length, f.length());
}
assertTrue(!partitioned.exists()); // should be deleted
}
@Test (timeout = 30000)
public void testStat2Paths1() {
assertNull(FileUtil.stat2Paths(null));
FileStatus[] fileStatuses = new FileStatus[0];
Path[] paths = FileUtil.stat2Paths(fileStatuses);
assertEquals(0, paths.length);
Path path1 = new Path("file://foo");
Path path2 = new Path("file://moo");
fileStatuses = new FileStatus[] {
new FileStatus(3, false, 0, 0, 0, path1),
new FileStatus(3, false, 0, 0, 0, path2)
};
paths = FileUtil.stat2Paths(fileStatuses);
assertEquals(2, paths.length);
assertEquals(paths[0], path1);
assertEquals(paths[1], path2);
}
@Test (timeout = 30000)
public void testStat2Paths2() {
Path defaultPath = new Path("file://default");
Path[] paths = FileUtil.stat2Paths(null, defaultPath);
assertEquals(1, paths.length);
assertEquals(defaultPath, paths[0]);
paths = FileUtil.stat2Paths(null, null);
assertTrue(paths != null);
assertEquals(1, paths.length);
assertEquals(null, paths[0]);
Path path1 = new Path("file://foo");
Path path2 = new Path("file://moo");
FileStatus[] fileStatuses = new FileStatus[] {
new FileStatus(3, false, 0, 0, 0, path1),
new FileStatus(3, false, 0, 0, 0, path2)
};
paths = FileUtil.stat2Paths(fileStatuses, defaultPath);
assertEquals(2, paths.length);
assertEquals(paths[0], path1);
assertEquals(paths[1], path2);
}
@Test (timeout = 30000)
public void testSymlink() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
byte[] data = "testSymLink".getBytes();
File file = new File(del, FILE);
File link = new File(del, "_link");
//write some data to the file
FileOutputStream os = new FileOutputStream(file);
os.write(data);
os.close();
//create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
//ensure that symlink length is correctly reported by Java
Assert.assertEquals(data.length, file.length());
Assert.assertEquals(data.length, link.length());
//ensure that we can read from link.
FileInputStream in = new FileInputStream(link);
long len = 0;
while (in.read() > 0) {
len++;
}
in.close();
Assert.assertEquals(data.length, len);
}
/**
* Test that rename on a symlink works as expected.
*/
@Test (timeout = 30000)
public void testSymlinkRenameTo() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
File file = new File(del, FILE);
file.createNewFile();
File link = new File(del, "_link");
// create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertTrue(file.exists());
Assert.assertTrue(link.exists());
File link2 = new File(del, "_link2");
// Rename the symlink
Assert.assertTrue(link.renameTo(link2));
// Make sure the file still exists
// (NOTE: this would fail on Java6 on Windows if we didn't
// copy the file in FileUtil#symlink)
Assert.assertTrue(file.exists());
Assert.assertTrue(link2.exists());
Assert.assertFalse(link.exists());
}
/**
* Test that deletion of a symlink works as expected.
*/
@Test (timeout = 30000)
public void testSymlinkDelete() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
File file = new File(del, FILE);
file.createNewFile();
File link = new File(del, "_link");
// create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertTrue(file.exists());
Assert.assertTrue(link.exists());
// make sure that deleting a symlink works properly
Assert.assertTrue(link.delete());
Assert.assertFalse(link.exists());
Assert.assertTrue(file.exists());
}
/**
* Test that length on a symlink works as expected.
*/
@Test (timeout = 30000)
public void testSymlinkLength() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
byte[] data = "testSymLinkData".getBytes();
File file = new File(del, FILE);
File link = new File(del, "_link");
// write some data to the file
FileOutputStream os = new FileOutputStream(file);
os.write(data);
os.close();
Assert.assertEquals(0, link.length());
// create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
// ensure that File#length returns the target file and link size
Assert.assertEquals(data.length, file.length());
Assert.assertEquals(data.length, link.length());
file.delete();
Assert.assertFalse(file.exists());
if (Shell.WINDOWS && !Shell.isJava7OrAbove()) {
// On Java6 on Windows, we copied the file
Assert.assertEquals(data.length, link.length());
} else {
// Otherwise, the target file size is zero
Assert.assertEquals(0, link.length());
}
link.delete();
Assert.assertFalse(link.exists());
}
private void doUntarAndVerify(File tarFile, File untarDir)
throws IOException {
if (untarDir.exists() && !FileUtil.fullyDelete(untarDir)) {
throw new IOException("Could not delete directory '" + untarDir + "'");
}
FileUtil.unTar(tarFile, untarDir);
String parentDir = untarDir.getCanonicalPath() + Path.SEPARATOR + "name";
File testFile = new File(parentDir + Path.SEPARATOR + "version");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 0);
String imageDir = parentDir + Path.SEPARATOR + "image";
testFile = new File(imageDir + Path.SEPARATOR + "fsimage");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 157);
String currentDir = parentDir + Path.SEPARATOR + "current";
testFile = new File(currentDir + Path.SEPARATOR + "fsimage");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 4331);
testFile = new File(currentDir + Path.SEPARATOR + "edits");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 1033);
testFile = new File(currentDir + Path.SEPARATOR + "fstime");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 8);
}
@Test (timeout = 30000)
public void testUntar() throws IOException {
String tarGzFileName = System.getProperty("test.cache.data",
"build/test/cache") + "/test-untar.tgz";
String tarFileName = System.getProperty("test.cache.data",
"build/test/cache") + "/test-untar.tar";
String dataDir = System.getProperty("test.build.data", "build/test/data");
File untarDir = new File(dataDir, "untarDir");
doUntarAndVerify(new File(tarGzFileName), untarDir);
doUntarAndVerify(new File(tarFileName), untarDir);
}
@Test (timeout = 30000)
public void testCreateJarWithClassPath() throws Exception {
// setup test directory for files
Assert.assertFalse(tmp.exists());
Assert.assertTrue(tmp.mkdirs());
// create files expected to match a wildcard
List<File> wildcardMatches = Arrays.asList(new File(tmp, "wildcard1.jar"),
new File(tmp, "wildcard2.jar"), new File(tmp, "wildcard3.JAR"),
new File(tmp, "wildcard4.JAR"));
for (File wildcardMatch: wildcardMatches) {
Assert.assertTrue("failure creating file: " + wildcardMatch,
wildcardMatch.createNewFile());
}
// create non-jar files, which we expect to not be included in the classpath
Assert.assertTrue(new File(tmp, "text.txt").createNewFile());
Assert.assertTrue(new File(tmp, "executable.exe").createNewFile());
Assert.assertTrue(new File(tmp, "README").createNewFile());
// create classpath jar
String wildcardPath = tmp.getCanonicalPath() + File.separator + "*";
String nonExistentSubdir = tmp.getCanonicalPath() + Path.SEPARATOR + "subdir"
+ Path.SEPARATOR;
List<String> classPaths = Arrays.asList("", "cp1.jar", "cp2.jar", wildcardPath,
"cp3.jar", nonExistentSubdir);
String inputClassPath = StringUtils.join(File.pathSeparator, classPaths);
String[] jarCp = FileUtil.createJarWithClassPath(inputClassPath + File.pathSeparator + "unexpandedwildcard/*",
new Path(tmp.getCanonicalPath()), System.getenv());
String classPathJar = jarCp[0];
assertNotEquals("Unexpanded wildcard was not placed in extra classpath", jarCp[1].indexOf("unexpanded"), -1);
// verify classpath by reading manifest from jar file
JarFile jarFile = null;
try {
jarFile = new JarFile(classPathJar);
Manifest jarManifest = jarFile.getManifest();
Assert.assertNotNull(jarManifest);
Attributes mainAttributes = jarManifest.getMainAttributes();
Assert.assertNotNull(mainAttributes);
Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH);
Assert.assertNotNull(classPathAttr);
List<String> expectedClassPaths = new ArrayList<String>();
for (String classPath: classPaths) {
if (classPath.length() == 0) {
continue;
}
if (wildcardPath.equals(classPath)) {
// add wildcard matches
for (File wildcardMatch: wildcardMatches) {
expectedClassPaths.add(wildcardMatch.toURI().toURL()
.toExternalForm());
}
} else {
File fileCp = null;
if(!new Path(classPath).isAbsolute()) {
fileCp = new File(tmp, classPath);
}
else {
fileCp = new File(classPath);
}
if (nonExistentSubdir.equals(classPath)) {
// expect to maintain trailing path separator if present in input, even
// if directory doesn't exist yet
expectedClassPaths.add(fileCp.toURI().toURL()
.toExternalForm() + Path.SEPARATOR);
} else {
expectedClassPaths.add(fileCp.toURI().toURL()
.toExternalForm());
}
}
}
List<String> actualClassPaths = Arrays.asList(classPathAttr.split(" "));
Collections.sort(expectedClassPaths);
Collections.sort(actualClassPaths);
Assert.assertEquals(expectedClassPaths, actualClassPaths);
} finally {
if (jarFile != null) {
try {
jarFile.close();
} catch (IOException e) {
LOG.warn("exception closing jarFile: " + classPathJar, e);
}
}
}
}
@Ignore
public void setupCompareFs() {
// Set up Strings
String host1 = "1.2.3.4";
String host2 = "2.3.4.5";
int port1 = 7000;
int port2 = 7001;
String uris1 = "hdfs://" + host1 + ":" + Integer.toString(port1) + "/tmp/foo";
String uris2 = "hdfs://" + host1 + ":" + Integer.toString(port2) + "/tmp/foo";
String uris3 = "hdfs://" + host2 + ":" + Integer.toString(port2) + "/tmp/foo";
String uris4 = "hdfs://" + host2 + ":" + Integer.toString(port2) + "/tmp/foo";
String uris5 = "file:///" + host1 + ":" + Integer.toString(port1) + "/tmp/foo";
String uris6 = "hdfs:///" + host1 + "/tmp/foo";
// Set up URI objects
try {
uri1 = new URI(uris1);
uri2 = new URI(uris2);
uri3 = new URI(uris3);
uri4 = new URI(uris4);
uri5 = new URI(uris5);
uri6 = new URI(uris6);
} catch (URISyntaxException use) {
}
// Set up InetAddress
inet1 = mock(InetAddress.class);
when(inet1.getCanonicalHostName()).thenReturn(host1);
inet2 = mock(InetAddress.class);
when(inet2.getCanonicalHostName()).thenReturn(host1);
inet3 = mock(InetAddress.class);
when(inet3.getCanonicalHostName()).thenReturn(host2);
inet4 = mock(InetAddress.class);
when(inet4.getCanonicalHostName()).thenReturn(host2);
inet5 = mock(InetAddress.class);
when(inet5.getCanonicalHostName()).thenReturn(host1);
inet6 = mock(InetAddress.class);
when(inet6.getCanonicalHostName()).thenReturn(host1);
// Link of InetAddress to corresponding URI
try {
when(InetAddress.getByName(uris1)).thenReturn(inet1);
when(InetAddress.getByName(uris2)).thenReturn(inet2);
when(InetAddress.getByName(uris3)).thenReturn(inet3);
when(InetAddress.getByName(uris4)).thenReturn(inet4);
when(InetAddress.getByName(uris5)).thenReturn(inet5);
} catch (UnknownHostException ue) {
}
fs1 = mock(FileSystem.class);
when(fs1.getUri()).thenReturn(uri1);
fs2 = mock(FileSystem.class);
when(fs2.getUri()).thenReturn(uri2);
fs3 = mock(FileSystem.class);
when(fs3.getUri()).thenReturn(uri3);
fs4 = mock(FileSystem.class);
when(fs4.getUri()).thenReturn(uri4);
fs5 = mock(FileSystem.class);
when(fs5.getUri()).thenReturn(uri5);
fs6 = mock(FileSystem.class);
when(fs6.getUri()).thenReturn(uri6);
}
@Test
public void testCompareFsNull() throws Exception {
setupCompareFs();
assertEquals(FileUtil.compareFs(null,fs1),false);
assertEquals(FileUtil.compareFs(fs1,null),false);
}
@Test
public void testCompareFsDirectories() throws Exception {
setupCompareFs();
assertEquals(FileUtil.compareFs(fs1,fs1),true);
assertEquals(FileUtil.compareFs(fs1,fs2),false);
assertEquals(FileUtil.compareFs(fs1,fs5),false);
assertEquals(FileUtil.compareFs(fs3,fs4),true);
assertEquals(FileUtil.compareFs(fs1,fs6),false);
}
}
| 41,025 | 33.074751 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.net.URI;
import org.apache.commons.net.ftp.FTP;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test;
public class TestDelegateToFileSystem {
private static final String FTP_DUMMYHOST = "ftp://dummyhost";
private static final URI FTP_URI_NO_PORT = URI.create(FTP_DUMMYHOST);
private static final URI FTP_URI_WITH_PORT = URI.create(FTP_DUMMYHOST + ":"
+ FTP.DEFAULT_PORT);
private void testDefaultUriInternal(String defaultUri)
throws UnsupportedFileSystemException {
final Configuration conf = new Configuration();
FileSystem.setDefaultUri(conf, defaultUri);
final AbstractFileSystem ftpFs =
AbstractFileSystem.get(FTP_URI_NO_PORT, conf);
Assert.assertEquals(FTP_URI_WITH_PORT, ftpFs.getUri());
}
@Test
public void testDefaultURIwithOutPort() throws Exception {
testDefaultUriInternal("hdfs://dummyhost");
}
@Test
public void testDefaultURIwithPort() throws Exception {
testDefaultUriInternal("hdfs://dummyhost:8020");
}
}
| 1,884 | 34.566038 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
public class TestS3_LocalFileContextURI extends FileContextURIBase {
@Override
@Before
public void setUp() throws Exception {
Configuration localConf = new Configuration();
fc2 = FileContext.getFileContext(localConf);
Configuration s3conf = new Configuration();
s3conf.set(FS_DEFAULT_NAME_DEFAULT, s3conf.get("test.fs.s3.name"));
fc1 = FileContext.getFileContext(s3conf);
}
}
| 1,407 | 35.102564 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.net.URL;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestFileSystemInitialization {
/**
* Check if FileSystem can be properly initialized if URLStreamHandlerFactory
* is registered.
*/
@Test
public void testInitializationWithRegisteredStreamFactory() {
Configuration conf = new Configuration();
URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory(conf));
try {
FileSystem.getFileSystemClass("file", conf);
}
catch (IOException ok) {
// we might get an exception but this not related to infinite loop problem
assertFalse(false);
}
}
}
| 1,554 | 32.085106 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Shell;
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import java.io.*;
import java.net.URI;
import java.util.Arrays;
import java.util.Random;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
import static org.mockito.Mockito.*;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
/**
* This class tests the local file system via the FileSystem abstraction.
*/
public class TestLocalFileSystem {
private static final String TEST_ROOT_DIR
= System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
private final File base = new File(TEST_ROOT_DIR);
private final Path TEST_PATH = new Path(TEST_ROOT_DIR, "test-file");
private Configuration conf;
private LocalFileSystem fileSys;
private void cleanupFile(FileSystem fs, Path name) throws IOException {
assertTrue(fs.exists(name));
fs.delete(name, true);
assertTrue(!fs.exists(name));
}
@Before
public void setup() throws IOException {
conf = new Configuration(false);
conf.set("fs.file.impl", LocalFileSystem.class.getName());
fileSys = FileSystem.getLocal(conf);
fileSys.delete(new Path(TEST_ROOT_DIR), true);
}
@After
public void after() throws IOException {
FileUtil.setWritable(base, true);
FileUtil.fullyDelete(base);
assertTrue(!base.exists());
}
/**
* Test the capability of setting the working directory.
*/
@Test(timeout = 10000)
public void testWorkingDirectory() throws IOException {
Path origDir = fileSys.getWorkingDirectory();
Path subdir = new Path(TEST_ROOT_DIR, "new");
try {
// make sure it doesn't already exist
assertTrue(!fileSys.exists(subdir));
// make it and check for it
assertTrue(fileSys.mkdirs(subdir));
assertTrue(fileSys.isDirectory(subdir));
fileSys.setWorkingDirectory(subdir);
// create a directory and check for it
Path dir1 = new Path("dir1");
assertTrue(fileSys.mkdirs(dir1));
assertTrue(fileSys.isDirectory(dir1));
// delete the directory and make sure it went away
fileSys.delete(dir1, true);
assertTrue(!fileSys.exists(dir1));
// create files and manipulate them.
Path file1 = new Path("file1");
Path file2 = new Path("sub/file2");
String contents = writeFile(fileSys, file1, 1);
fileSys.copyFromLocalFile(file1, file2);
assertTrue(fileSys.exists(file1));
assertTrue(fileSys.isFile(file1));
cleanupFile(fileSys, file2);
fileSys.copyToLocalFile(file1, file2);
cleanupFile(fileSys, file2);
// try a rename
fileSys.rename(file1, file2);
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
fileSys.rename(file2, file1);
// try reading a file
InputStream stm = fileSys.open(file1);
byte[] buffer = new byte[3];
int bytesRead = stm.read(buffer, 0, 3);
assertEquals(contents, new String(buffer, 0, bytesRead));
stm.close();
} finally {
fileSys.setWorkingDirectory(origDir);
}
}
/**
* test Syncable interface on raw local file system
* @throws IOException
*/
@Test(timeout = 1000)
public void testSyncable() throws IOException {
FileSystem fs = fileSys.getRawFileSystem();
Path file = new Path(TEST_ROOT_DIR, "syncable");
FSDataOutputStream out = fs.create(file);;
final int bytesWritten = 1;
byte[] expectedBuf = new byte[] {'0', '1', '2', '3'};
try {
out.write(expectedBuf, 0, 1);
out.hflush();
verifyFile(fs, file, bytesWritten, expectedBuf);
out.write(expectedBuf, bytesWritten, expectedBuf.length-bytesWritten);
out.hsync();
verifyFile(fs, file, expectedBuf.length, expectedBuf);
} finally {
out.close();
}
}
private void verifyFile(FileSystem fs, Path file, int bytesToVerify,
byte[] expectedBytes) throws IOException {
FSDataInputStream in = fs.open(file);
try {
byte[] readBuf = new byte[bytesToVerify];
in.readFully(readBuf, 0, bytesToVerify);
for (int i=0; i<bytesToVerify; i++) {
assertEquals(expectedBytes[i], readBuf[i]);
}
} finally {
in.close();
}
}
@Test(timeout = 10000)
public void testCopy() throws IOException {
Path src = new Path(TEST_ROOT_DIR, "dingo");
Path dst = new Path(TEST_ROOT_DIR, "yak");
writeFile(fileSys, src, 1);
assertTrue(FileUtil.copy(fileSys, src, fileSys, dst, true, false, conf));
assertTrue(!fileSys.exists(src) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys, dst, fileSys, src, false, false, conf));
assertTrue(fileSys.exists(src) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys, src, fileSys, dst, true, true, conf));
assertTrue(!fileSys.exists(src) && fileSys.exists(dst));
fileSys.mkdirs(src);
assertTrue(FileUtil.copy(fileSys, dst, fileSys, src, false, false, conf));
Path tmp = new Path(src, dst.getName());
assertTrue(fileSys.exists(tmp) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys, dst, fileSys, src, false, true, conf));
assertTrue(fileSys.delete(tmp, true));
fileSys.mkdirs(tmp);
try {
FileUtil.copy(fileSys, dst, fileSys, src, true, true, conf);
fail("Failed to detect existing dir");
} catch (IOException e) {
// Expected
}
}
@Test(timeout = 1000)
public void testHomeDirectory() throws IOException {
Path home = new Path(System.getProperty("user.home"))
.makeQualified(fileSys);
Path fsHome = fileSys.getHomeDirectory();
assertEquals(home, fsHome);
}
@Test(timeout = 1000)
public void testPathEscapes() throws IOException {
Path path = new Path(TEST_ROOT_DIR, "foo%bar");
writeFile(fileSys, path, 1);
FileStatus status = fileSys.getFileStatus(path);
assertEquals(path.makeQualified(fileSys), status.getPath());
cleanupFile(fileSys, path);
}
@Test(timeout = 1000)
public void testCreateFileAndMkdirs() throws IOException {
Path test_dir = new Path(TEST_ROOT_DIR, "test_dir");
Path test_file = new Path(test_dir, "file1");
assertTrue(fileSys.mkdirs(test_dir));
final int fileSize = new Random().nextInt(1 << 20) + 1;
writeFile(fileSys, test_file, fileSize);
{
//check FileStatus and ContentSummary
final FileStatus status = fileSys.getFileStatus(test_file);
Assert.assertEquals(fileSize, status.getLen());
final ContentSummary summary = fileSys.getContentSummary(test_dir);
Assert.assertEquals(fileSize, summary.getLength());
}
// creating dir over a file
Path bad_dir = new Path(test_file, "another_dir");
try {
fileSys.mkdirs(bad_dir);
fail("Failed to detect existing file in path");
} catch (ParentNotDirectoryException e) {
// Expected
}
try {
fileSys.mkdirs(null);
fail("Failed to detect null in mkdir arg");
} catch (IllegalArgumentException e) {
// Expected
}
}
/** Test deleting a file, directory, and non-existent path */
@Test(timeout = 1000)
public void testBasicDelete() throws IOException {
Path dir1 = new Path(TEST_ROOT_DIR, "dir1");
Path file1 = new Path(TEST_ROOT_DIR, "file1");
Path file2 = new Path(TEST_ROOT_DIR+"/dir1", "file2");
Path file3 = new Path(TEST_ROOT_DIR, "does-not-exist");
assertTrue(fileSys.mkdirs(dir1));
writeFile(fileSys, file1, 1);
writeFile(fileSys, file2, 1);
assertFalse("Returned true deleting non-existant path",
fileSys.delete(file3));
assertTrue("Did not delete file", fileSys.delete(file1));
assertTrue("Did not delete non-empty dir", fileSys.delete(dir1));
}
@Test(timeout = 1000)
public void testStatistics() throws Exception {
int fileSchemeCount = 0;
for (Statistics stats : FileSystem.getAllStatistics()) {
if (stats.getScheme().equals("file")) {
fileSchemeCount++;
}
}
assertEquals(1, fileSchemeCount);
}
@Test(timeout = 1000)
public void testHasFileDescriptor() throws IOException {
Path path = new Path(TEST_ROOT_DIR, "test-file");
writeFile(fileSys, path, 1);
BufferedFSInputStream bis = new BufferedFSInputStream(
new RawLocalFileSystem().new LocalFSFileInputStream(path), 1024);
assertNotNull(bis.getFileDescriptor());
bis.close();
}
@Test(timeout = 1000)
public void testListStatusWithColons() throws IOException {
assumeTrue(!Shell.WINDOWS);
File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
colonFile.mkdirs();
FileStatus[] stats = fileSys.listStatus(new Path(TEST_ROOT_DIR));
assertEquals("Unexpected number of stats", 1, stats.length);
assertEquals("Bad path from stat", colonFile.getAbsolutePath(),
stats[0].getPath().toUri().getPath());
}
@Test
public void testListStatusReturnConsistentPathOnWindows() throws IOException {
assumeTrue(Shell.WINDOWS);
String dirNoDriveSpec = TEST_ROOT_DIR;
if (dirNoDriveSpec.charAt(1) == ':')
dirNoDriveSpec = dirNoDriveSpec.substring(2);
File file = new File(dirNoDriveSpec, "foo");
file.mkdirs();
FileStatus[] stats = fileSys.listStatus(new Path(dirNoDriveSpec));
assertEquals("Unexpected number of stats", 1, stats.length);
assertEquals("Bad path from stat", new Path(file.getPath()).toUri().getPath(),
stats[0].getPath().toUri().getPath());
}
@Test(timeout = 10000)
public void testReportChecksumFailure() throws IOException {
base.mkdirs();
assertTrue(base.exists() && base.isDirectory());
final File dir1 = new File(base, "dir1");
final File dir2 = new File(dir1, "dir2");
dir2.mkdirs();
assertTrue(dir2.exists() && FileUtil.canWrite(dir2));
final String dataFileName = "corruptedData";
final Path dataPath = new Path(new File(dir2, dataFileName).toURI());
final Path checksumPath = fileSys.getChecksumFile(dataPath);
final FSDataOutputStream fsdos = fileSys.create(dataPath);
try {
fsdos.writeUTF("foo");
} finally {
fsdos.close();
}
assertTrue(fileSys.pathToFile(dataPath).exists());
final long dataFileLength = fileSys.getFileStatus(dataPath).getLen();
assertTrue(dataFileLength > 0);
// check the the checksum file is created and not empty:
assertTrue(fileSys.pathToFile(checksumPath).exists());
final long checksumFileLength = fileSys.getFileStatus(checksumPath).getLen();
assertTrue(checksumFileLength > 0);
// this is a hack to force the #reportChecksumFailure() method to stop
// climbing up at the 'base' directory and use 'dir1/bad_files' as the
// corrupted files storage:
FileUtil.setWritable(base, false);
FSDataInputStream dataFsdis = fileSys.open(dataPath);
FSDataInputStream checksumFsdis = fileSys.open(checksumPath);
boolean retryIsNecessary = fileSys.reportChecksumFailure(dataPath, dataFsdis, 0, checksumFsdis, 0);
assertTrue(!retryIsNecessary);
// the data file should be moved:
assertTrue(!fileSys.pathToFile(dataPath).exists());
// the checksum file should be moved:
assertTrue(!fileSys.pathToFile(checksumPath).exists());
// check that the files exist in the new location where they were moved:
File[] dir1files = dir1.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname != null && !pathname.getName().equals("dir2");
}
});
assertTrue(dir1files != null);
assertTrue(dir1files.length == 1);
File badFilesDir = dir1files[0];
File[] badFiles = badFilesDir.listFiles();
assertTrue(badFiles != null);
assertTrue(badFiles.length == 2);
boolean dataFileFound = false;
boolean checksumFileFound = false;
for (File badFile: badFiles) {
if (badFile.getName().startsWith(dataFileName)) {
assertTrue(dataFileLength == badFile.length());
dataFileFound = true;
} else if (badFile.getName().contains(dataFileName + ".crc")) {
assertTrue(checksumFileLength == badFile.length());
checksumFileFound = true;
}
}
assertTrue(dataFileFound);
assertTrue(checksumFileFound);
}
private void checkTimesStatus(Path path,
long expectedModTime, long expectedAccTime) throws IOException {
FileStatus status = fileSys.getFileStatus(path);
assertEquals(expectedModTime, status.getModificationTime());
assertEquals(expectedAccTime, status.getAccessTime());
}
@Test(timeout = 1000)
public void testSetTimes() throws Exception {
Path path = new Path(TEST_ROOT_DIR, "set-times");
writeFile(fileSys, path, 1);
// test only to the nearest second, as the raw FS may not
// support millisecond timestamps
long newModTime = 12345000;
long newAccTime = 23456000;
FileStatus status = fileSys.getFileStatus(path);
assertTrue("check we're actually changing something", newModTime != status.getModificationTime());
assertTrue("check we're actually changing something", newAccTime != status.getAccessTime());
fileSys.setTimes(path, newModTime, newAccTime);
checkTimesStatus(path, newModTime, newAccTime);
newModTime = 34567000;
fileSys.setTimes(path, newModTime, -1);
checkTimesStatus(path, newModTime, newAccTime);
newAccTime = 45678000;
fileSys.setTimes(path, -1, newAccTime);
checkTimesStatus(path, newModTime, newAccTime);
}
/**
* Regression test for HADOOP-9307: BufferedFSInputStream returning
* wrong results after certain sequences of seeks and reads.
*/
@Test
public void testBufferedFSInputStream() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
conf.setInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, 4096);
FileSystem fs = FileSystem.newInstance(conf);
byte[] buf = new byte[10*1024];
new Random().nextBytes(buf);
// Write random bytes to file
FSDataOutputStream stream = fs.create(TEST_PATH);
try {
stream.write(buf);
} finally {
stream.close();
}
Random r = new Random();
FSDataInputStream stm = fs.open(TEST_PATH);
// Record the sequence of seeks and reads which trigger a failure.
int seeks[] = new int[10];
int reads[] = new int[10];
try {
for (int i = 0; i < 1000; i++) {
int seekOff = r.nextInt(buf.length);
int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
seeks[i % seeks.length] = seekOff;
reads[i % reads.length] = toRead;
verifyRead(stm, buf, seekOff, toRead);
}
} catch (AssertionError afe) {
StringBuilder sb = new StringBuilder();
sb.append("Sequence of actions:\n");
for (int j = 0; j < seeks.length; j++) {
sb.append("seek @ ").append(seeks[j]).append(" ")
.append("read ").append(reads[j]).append("\n");
}
System.err.println(sb.toString());
throw afe;
} finally {
stm.close();
}
}
/**
* Tests a simple rename of a directory.
*/
@Test
public void testRenameDirectory() throws IOException {
Path src = new Path(TEST_ROOT_DIR, "dir1");
Path dst = new Path(TEST_ROOT_DIR, "dir2");
fileSys.delete(src, true);
fileSys.delete(dst, true);
assertTrue(fileSys.mkdirs(src));
assertTrue(fileSys.rename(src, dst));
assertTrue(fileSys.exists(dst));
assertFalse(fileSys.exists(src));
}
/**
* Tests that renaming a directory replaces the destination if the destination
* is an existing empty directory.
*
* Before:
* /dir1
* /file1
* /file2
* /dir2
*
* After rename("/dir1", "/dir2"):
* /dir2
* /file1
* /file2
*/
@Test
public void testRenameReplaceExistingEmptyDirectory() throws IOException {
Path src = new Path(TEST_ROOT_DIR, "dir1");
Path dst = new Path(TEST_ROOT_DIR, "dir2");
fileSys.delete(src, true);
fileSys.delete(dst, true);
assertTrue(fileSys.mkdirs(src));
writeFile(fileSys, new Path(src, "file1"), 1);
writeFile(fileSys, new Path(src, "file2"), 1);
assertTrue(fileSys.mkdirs(dst));
assertTrue(fileSys.rename(src, dst));
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.exists(new Path(dst, "file1")));
assertTrue(fileSys.exists(new Path(dst, "file2")));
assertFalse(fileSys.exists(src));
}
/**
* Tests that renaming a directory to an existing directory that is not empty
* results in a full copy of source to destination.
*
* Before:
* /dir1
* /dir2
* /dir3
* /file1
* /file2
*
* After rename("/dir1/dir2/dir3", "/dir1"):
* /dir1
* /dir3
* /file1
* /file2
*/
@Test
public void testRenameMoveToExistingNonEmptyDirectory() throws IOException {
Path src = new Path(TEST_ROOT_DIR, "dir1/dir2/dir3");
Path dst = new Path(TEST_ROOT_DIR, "dir1");
fileSys.delete(src, true);
fileSys.delete(dst, true);
assertTrue(fileSys.mkdirs(src));
writeFile(fileSys, new Path(src, "file1"), 1);
writeFile(fileSys, new Path(src, "file2"), 1);
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.rename(src, dst));
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.exists(new Path(dst, "dir3")));
assertTrue(fileSys.exists(new Path(dst, "dir3/file1")));
assertTrue(fileSys.exists(new Path(dst, "dir3/file2")));
assertFalse(fileSys.exists(src));
}
private void verifyRead(FSDataInputStream stm, byte[] fileContents,
int seekOff, int toRead) throws IOException {
byte[] out = new byte[toRead];
stm.seek(seekOff);
stm.readFully(out);
byte[] expected = Arrays.copyOfRange(fileContents, seekOff, seekOff+toRead);
if (!Arrays.equals(out, expected)) {
String s ="\nExpected: " +
StringUtils.byteToHexString(expected) +
"\ngot: " +
StringUtils.byteToHexString(out) +
"\noff=" + seekOff + " len=" + toRead;
fail(s);
}
}
@Test
public void testStripFragmentFromPath() throws Exception {
FileSystem fs = FileSystem.getLocal(new Configuration());
Path pathQualified = TEST_PATH.makeQualified(fs.getUri(),
fs.getWorkingDirectory());
Path pathWithFragment = new Path(
new URI(pathQualified.toString() + "#glacier"));
// Create test file with fragment
FileSystemTestHelper.createFile(fs, pathWithFragment);
Path resolved = fs.resolvePath(pathWithFragment);
assertEquals("resolvePath did not strip fragment from Path", pathQualified,
resolved);
}
@Test
public void testAppendSetsPosCorrectly() throws Exception {
FileSystem fs = fileSys.getRawFileSystem();
Path file = new Path(TEST_ROOT_DIR, "test-append");
fs.delete(file, true);
FSDataOutputStream out = fs.create(file);
try {
out.write("text1".getBytes());
} finally {
out.close();
}
// Verify the position
out = fs.append(file);
try {
assertEquals(5, out.getPos());
out.write("text2".getBytes());
} finally {
out.close();
}
// Verify the content
FSDataInputStream in = fs.open(file);
try {
byte[] buf = new byte[in.available()];
in.readFully(buf);
assertEquals("text1text2", new String(buf));
} finally {
in.close();
}
}
@Test
public void testFileStatusPipeFile() throws Exception {
RawLocalFileSystem origFs = new RawLocalFileSystem();
RawLocalFileSystem fs = spy(origFs);
Configuration conf = mock(Configuration.class);
fs.setConf(conf);
Whitebox.setInternalState(fs, "useDeprecatedFileStatus", false);
Path path = new Path("/foo");
File pipe = mock(File.class);
when(pipe.isFile()).thenReturn(false);
when(pipe.isDirectory()).thenReturn(false);
when(pipe.exists()).thenReturn(true);
FileStatus stat = mock(FileStatus.class);
doReturn(pipe).when(fs).pathToFile(path);
doReturn(stat).when(fs).getFileStatus(path);
FileStatus[] stats = fs.listStatus(path);
assertTrue(stats != null && stats.length == 1 && stats[0] == stat);
}
}
| 21,443 | 32.876777 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextCreateMkdir.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.Before;
public class TestLocalFSFileContextCreateMkdir extends
FileContextCreateMkdirBaseTest {
@Override
@Before
public void setUp() throws Exception {
fc = FileContext.getLocalFSFileContext();
super.setUp();
}
}
| 1,090 | 32.060606 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.permission.FsPermission;
public class TestLocalFSFileContextMainOperations extends FileContextMainOperationsBaseTest {
@Override
@Before
public void setUp() throws Exception {
fc = FileContext.getLocalFSFileContext();
super.setUp();
}
static Path wd = null;
@Override
protected Path getDefaultWorkingDirectory() throws IOException {
if (wd == null)
wd = FileSystem.getLocal(new Configuration()).getWorkingDirectory();
return wd;
}
@Test
public void testFileContextNoCache() throws UnsupportedFileSystemException {
FileContext fc1 = FileContext.getLocalFSFileContext();
Assert.assertTrue(fc1 != fc);
}
@Override
protected boolean listCorruptedBlocksSupported() {
return false;
}
@Test
public void testDefaultFilePermission() throws IOException {
Path file = fileContextTestHelper.getTestRootPath(fc,
"testDefaultFilePermission");
FileContextTestHelper.createFile(fc, file);
FsPermission expect = FileContext.FILE_DEFAULT_PERM.applyUMask(fc.getUMask());
Assert.assertEquals(expect, fc.getFileStatus(file)
.getPermission());
}
}
| 2,203 | 31.411765 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import org.junit.Assert;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
/**
* <p>
* A collection of permission tests for the {@link FileContext}.
* This test should be used for testing an instance of FileContext
* that has been initialized to a specific default FileSystem such a
* LocalFileSystem, HDFS,S3, etc.
* </p>
* <p>
* To test a given {@link FileSystem} implementation create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fc</code>
* {@link FileContext} instance variable.
*
* Since this a junit 4 you can also do a single setup before
* the start of any tests.
* E.g.
* @BeforeClass public static void clusterSetupAtBegining()
* @AfterClass public static void ClusterShutdownAtEnd()
* </p>
*/
public abstract class FileContextPermissionBase {
{
try {
((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
.setLevel(org.apache.log4j.Level.DEBUG);
}
catch(Exception e) {
System.out.println("Cannot change log level\n"
+ StringUtils.stringifyException(e));
}
}
protected FileContextTestHelper fileContextTestHelper;
protected FileContext fc;
protected FileContextTestHelper getFileContextHelper() {
return new FileContextTestHelper();
}
protected abstract FileContext getFileContext() throws Exception;
@Before
public void setUp() throws Exception {
fileContextTestHelper = getFileContextHelper();
fc = getFileContext();
fc.mkdir(fileContextTestHelper.getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
}
@After
public void tearDown() throws Exception {
fc.delete(fileContextTestHelper.getTestRootPath(fc), true);
}
private void cleanupFile(FileContext fc, Path name) throws IOException {
Assert.assertTrue(exists(fc, name));
fc.delete(name, true);
Assert.assertTrue(!exists(fc, name));
}
@Test
public void testCreatePermission() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
String filename = "foo";
Path f = fileContextTestHelper.getTestRootPath(fc, filename);
fileContextTestHelper.createFile(fc, filename);
doFilePermissionCheck(FileContext.FILE_DEFAULT_PERM.applyUMask(fc.getUMask()),
fc.getFileStatus(f).getPermission());
}
@Test
public void testSetPermission() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
String filename = "foo";
Path f = fileContextTestHelper.getTestRootPath(fc, filename);
createFile(fc, f);
try {
// create files and manipulate them.
FsPermission all = new FsPermission((short)0777);
FsPermission none = new FsPermission((short)0);
fc.setPermission(f, none);
doFilePermissionCheck(none, fc.getFileStatus(f).getPermission());
fc.setPermission(f, all);
doFilePermissionCheck(all, fc.getFileStatus(f).getPermission());
}
finally {cleanupFile(fc, f);}
}
@Test
public void testSetOwner() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
String filename = "bar";
Path f = fileContextTestHelper.getTestRootPath(fc, filename);
createFile(fc, f);
List<String> groups = null;
try {
groups = getGroups();
System.out.println(filename + ": " + fc.getFileStatus(f).getPermission());
}
catch(IOException e) {
System.out.println(StringUtils.stringifyException(e));
System.out.println("Cannot run test");
return;
}
if (groups == null || groups.size() < 1) {
System.out.println("Cannot run test: need at least one group. groups="
+ groups);
return;
}
// create files and manipulate them.
try {
String g0 = groups.get(0);
fc.setOwner(f, null, g0);
Assert.assertEquals(g0, fc.getFileStatus(f).getGroup());
if (groups.size() > 1) {
String g1 = groups.get(1);
fc.setOwner(f, null, g1);
Assert.assertEquals(g1, fc.getFileStatus(f).getGroup());
} else {
System.out.println("Not testing changing the group since user " +
"belongs to only one group.");
}
try {
fc.setOwner(f, null, null);
fail("Exception expected.");
} catch (IllegalArgumentException iae) {
// okay
}
}
finally {cleanupFile(fc, f);}
}
@Test
public void testUgi() throws IOException, InterruptedException {
UserGroupInformation otherUser = UserGroupInformation
.createRemoteUser("otherUser");
FileContext newFc = otherUser.doAs(new PrivilegedExceptionAction<FileContext>() {
@Override
public FileContext run() throws Exception {
FileContext newFc = FileContext.getFileContext();
return newFc;
}
});
assertEquals("otherUser",newFc.getUgi().getUserName());
}
static List<String> getGroups() throws IOException {
List<String> a = new ArrayList<String>();
String s = Shell.execCommand(Shell.getGroupsCommand());
for(StringTokenizer t = new StringTokenizer(s); t.hasMoreTokens(); ) {
a.add(t.nextToken());
}
return a;
}
void doFilePermissionCheck(FsPermission expectedPerm, FsPermission actualPerm) {
Assert.assertEquals(expectedPerm.applyUMask(getFileMask()), actualPerm);
}
/*
* Override the method below if the file system being tested masks our
* certain bits for file masks.
*/
static final FsPermission FILE_MASK_ZERO = new FsPermission((short) 0);
FsPermission getFileMask() {
return FILE_MASK_ZERO;
}
}
| 7,146 | 30.073913 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.util.EnumSet;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.junit.Assert;
/**
* Helper class for unit tests.
*/
public final class FileContextTestWrapper extends FSTestWrapper {
private final FileContext fc;
public FileContextTestWrapper(FileContext context) {
this(context, null);
}
public FileContextTestWrapper(FileContext context, String rootDir) {
super(rootDir);
this.fc = context;
}
public FSTestWrapper getLocalFSWrapper()
throws UnsupportedFileSystemException {
return new FileContextTestWrapper(FileContext.getLocalFSFileContext());
}
public Path getDefaultWorkingDirectory() throws IOException {
return getTestRootPath("/user/" + System.getProperty("user.name"))
.makeQualified(fc.getDefaultFileSystem().getUri(),
fc.getWorkingDirectory());
}
/*
* Create files with numBlocks blocks each with block size blockSize.
*/
public long createFile(Path path, int numBlocks, CreateOpts... options)
throws IOException {
BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
: DEFAULT_BLOCK_SIZE;
FSDataOutputStream out =
fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
return data.length;
}
public long createFile(Path path, int numBlocks, int blockSize)
throws IOException {
return createFile(path, numBlocks, CreateOpts.blockSize(blockSize),
CreateOpts.createParent());
}
public long createFile(Path path) throws IOException {
return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
}
public long createFile(String name) throws IOException {
Path path = getTestRootPath(name);
return createFile(path);
}
public long createFileNonRecursive(String name) throws IOException {
Path path = getTestRootPath(name);
return createFileNonRecursive(path);
}
public long createFileNonRecursive(Path path) throws IOException {
return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.donotCreateParent());
}
public void appendToFile(Path path, int numBlocks, CreateOpts... options)
throws IOException {
BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
: DEFAULT_BLOCK_SIZE;
FSDataOutputStream out;
out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
}
public boolean exists(Path p) throws IOException {
return fc.util().exists(p);
}
public boolean isFile(Path p) throws IOException {
try {
return fc.getFileStatus(p).isFile();
} catch (FileNotFoundException e) {
return false;
}
}
public boolean isDir(Path p) throws IOException {
try {
return fc.getFileStatus(p).isDirectory();
} catch (FileNotFoundException e) {
return false;
}
}
public boolean isSymlink(Path p) throws IOException {
try {
return fc.getFileLinkStatus(p).isSymlink();
} catch (FileNotFoundException e) {
return false;
}
}
public void writeFile(Path path, byte b[]) throws IOException {
FSDataOutputStream out =
fc.create(path,EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent());
out.write(b);
out.close();
}
public byte[] readFile(Path path, int len) throws IOException {
DataInputStream dis = fc.open(path);
byte[] buffer = new byte[len];
IOUtils.readFully(dis, buffer, 0, len);
dis.close();
return buffer;
}
public FileStatus containsPath(Path path, FileStatus[] dirList)
throws IOException {
for(int i = 0; i < dirList.length; i ++) {
if (path.equals(dirList[i].getPath()))
return dirList[i];
}
return null;
}
public FileStatus containsPath(String path, FileStatus[] dirList)
throws IOException {
return containsPath(new Path(path), dirList);
}
public void checkFileStatus(String path, fileType expectedType)
throws IOException {
FileStatus s = fc.getFileStatus(new Path(path));
Assert.assertNotNull(s);
if (expectedType == fileType.isDir) {
Assert.assertTrue(s.isDirectory());
} else if (expectedType == fileType.isFile) {
Assert.assertTrue(s.isFile());
} else if (expectedType == fileType.isSymlink) {
Assert.assertTrue(s.isSymlink());
}
Assert.assertEquals(fc.makeQualified(new Path(path)), s.getPath());
}
public void checkFileLinkStatus(String path, fileType expectedType)
throws IOException {
FileStatus s = fc.getFileLinkStatus(new Path(path));
Assert.assertNotNull(s);
if (expectedType == fileType.isDir) {
Assert.assertTrue(s.isDirectory());
} else if (expectedType == fileType.isFile) {
Assert.assertTrue(s.isFile());
} else if (expectedType == fileType.isSymlink) {
Assert.assertTrue(s.isSymlink());
}
Assert.assertEquals(fc.makeQualified(new Path(path)), s.getPath());
}
//
// FileContext wrappers
//
@Override
public Path makeQualified(Path path) {
return fc.makeQualified(path);
}
@Override
public void mkdir(Path dir, FsPermission permission, boolean createParent)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
fc.mkdir(dir, permission, createParent);
}
@Override
public boolean delete(Path f, boolean recursive)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return fc.delete(f, recursive);
}
@Override
public FileStatus getFileLinkStatus(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fc.getFileLinkStatus(f);
}
@Override
public void createSymlink(Path target, Path link, boolean createParent)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
fc.createSymlink(target, link, createParent);
}
@Override
public void setWorkingDirectory(Path newWDir) throws IOException {
fc.setWorkingDirectory(newWDir);
}
@Override
public Path getWorkingDirectory() {
return fc.getWorkingDirectory();
}
@Override
public FileStatus getFileStatus(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fc.getFileStatus(f);
}
@Override
public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag,
CreateOpts... opts) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
return fc.create(f, createFlag, opts);
}
@Override
public FSDataInputStream open(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fc.open(f);
}
@Override
public boolean setReplication(final Path f, final short replication)
throws AccessControlException, FileNotFoundException,
IOException {
return fc.setReplication(f, replication);
}
@Override
public Path getLinkTarget(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fc.getLinkTarget(f);
}
@Override
public void rename(Path src, Path dst, Rename... options)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
fc.rename(src, dst, options);
}
@Override
public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return fc.getFileBlockLocations(f, start, len);
}
@Override
public FileChecksum getFileChecksum(Path f) throws AccessControlException,
FileNotFoundException, IOException {
return fc.getFileChecksum(f);
}
@Override
public RemoteIterator<FileStatus> listStatusIterator(Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return fc.listStatus(f);
}
@Override
public void setPermission(final Path f, final FsPermission permission)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
fc.setPermission(f, permission);
}
@Override
public void setOwner(final Path f, final String username,
final String groupname) throws AccessControlException,
UnsupportedFileSystemException, FileNotFoundException,
IOException {
fc.setOwner(f, username, groupname);
}
@Override
public void setTimes(Path f, long mtime, long atime)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
fc.setTimes(f, mtime, atime);
}
@Override
public FileStatus[] listStatus(Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
return fc.util().listStatus(f);
}
@Override
public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
throws IOException {
return fc.util().globStatus(pathPattern, filter);
}
}
| 10,959 | 31.235294 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.junit.Test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
public class TestFileStatus {
private static final Log LOG =
LogFactory.getLog(TestFileStatus.class);
/** Values for creating {@link FileStatus} in some tests */
static final int LENGTH = 1;
static final int REPLICATION = 2;
static final long BLKSIZE = 3;
static final long MTIME = 4;
static final long ATIME = 5;
static final String OWNER = "owner";
static final String GROUP = "group";
static final FsPermission PERMISSION = FsPermission.valueOf("-rw-rw-rw-");
static final Path PATH = new Path("path");
/**
* Check that the write and readField methods work correctly.
*/
@Test
public void testFileStatusWritable() throws Exception {
FileStatus[] tests = {
new FileStatus(1,false,5,3,4,5,null,"","",new Path("/a/b")),
new FileStatus(0,false,1,2,3,new Path("/")),
new FileStatus(1,false,5,3,4,5,null,"","",new Path("/a/b"))
};
LOG.info("Writing FileStatuses to a ByteArrayOutputStream");
// Writing input list to ByteArrayOutputStream
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutput out = new DataOutputStream(baos);
for (FileStatus fs : tests) {
fs.write(out);
}
LOG.info("Creating ByteArrayInputStream object");
DataInput in =
new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
LOG.info("Testing if read objects are equal to written ones");
FileStatus dest = new FileStatus();
int iterator = 0;
for (FileStatus fs : tests) {
dest.readFields(in);
assertEquals("Different FileStatuses in iteration " + iterator,
dest, fs);
iterator++;
}
}
/**
* Check that the full parameter constructor works correctly.
*/
@Test
public void constructorFull() throws IOException {
boolean isdir = false;
Path symlink = new Path("symlink");
FileStatus fileStatus = new FileStatus(LENGTH, isdir, REPLICATION, BLKSIZE,
MTIME, ATIME, PERMISSION, OWNER, GROUP, symlink, PATH);
validateAccessors(fileStatus, LENGTH, isdir, REPLICATION, BLKSIZE, MTIME,
ATIME, PERMISSION, OWNER, GROUP, symlink, PATH);
}
/**
* Check that the non-symlink constructor works correctly.
*/
@Test
public void constructorNoSymlink() throws IOException {
boolean isdir = true;
FileStatus fileStatus = new FileStatus(LENGTH, isdir, REPLICATION, BLKSIZE,
MTIME, ATIME, PERMISSION, OWNER, GROUP, PATH);
validateAccessors(fileStatus, LENGTH, isdir, REPLICATION, BLKSIZE, MTIME,
ATIME, PERMISSION, OWNER, GROUP, null, PATH);
}
/**
* Check that the constructor without owner, group and permissions works
* correctly.
*/
@Test
public void constructorNoOwner() throws IOException {
boolean isdir = true;
FileStatus fileStatus = new FileStatus(LENGTH, isdir,
REPLICATION, BLKSIZE, MTIME, PATH);
validateAccessors(fileStatus, LENGTH, isdir, REPLICATION, BLKSIZE, MTIME,
0, FsPermission.getDirDefault(), "", "", null, PATH);
}
/**
* Check that the no parameter constructor works correctly.
*/
@Test
public void constructorBlank() throws IOException {
FileStatus fileStatus = new FileStatus();
validateAccessors(fileStatus, 0, false, 0, 0, 0,
0, FsPermission.getFileDefault(), "", "", null, null);
}
/**
* Check that FileStatus are equal if their paths are equal.
*/
@Test
public void testEquals() {
Path path = new Path("path");
FileStatus fileStatus1 = new FileStatus(1, true, 1, 1, 1, 1,
FsPermission.valueOf("-rw-rw-rw-"), "one", "one", null, path);
FileStatus fileStatus2 = new FileStatus(2, true, 2, 2, 2, 2,
FsPermission.valueOf("---x--x--x"), "two", "two", null, path);
assertEquals(fileStatus1, fileStatus2);
}
/**
* Check that FileStatus are not equal if their paths are not equal.
*/
@Test
public void testNotEquals() {
Path path1 = new Path("path1");
Path path2 = new Path("path2");
FileStatus fileStatus1 = new FileStatus(1, true, 1, 1, 1, 1,
FsPermission.valueOf("-rw-rw-rw-"), "one", "one", null, path1);
FileStatus fileStatus2 = new FileStatus(1, true, 1, 1, 1, 1,
FsPermission.valueOf("-rw-rw-rw-"), "one", "one", null, path2);
assertFalse(fileStatus1.equals(fileStatus2));
assertFalse(fileStatus2.equals(fileStatus1));
}
/**
* Check that toString produces the expected output for a file.
*/
@Test
public void toStringFile() throws IOException {
boolean isdir = false;
FileStatus fileStatus = new FileStatus(LENGTH, isdir, REPLICATION, BLKSIZE,
MTIME, ATIME, PERMISSION, OWNER, GROUP, null, PATH);
validateToString(fileStatus);
}
/**
* Check that toString produces the expected output for a directory.
*/
@Test
public void toStringDir() throws IOException {
FileStatus fileStatus = new FileStatus(LENGTH, true, REPLICATION, BLKSIZE,
MTIME, ATIME, PERMISSION, OWNER, GROUP, null, PATH);
validateToString(fileStatus);
}
@Test
public void testCompareTo() throws IOException {
Path path1 = new Path("path1");
Path path2 = new Path("path2");
FileStatus fileStatus1 =
new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
"one", "one", null, path1);
FileStatus fileStatus2 =
new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
"one", "one", null, path2);
assertTrue(fileStatus1.compareTo(fileStatus2) < 0);
assertTrue(fileStatus2.compareTo(fileStatus1) > 0);
List<FileStatus> statList = new ArrayList<>();
statList.add(fileStatus1);
statList.add(fileStatus2);
assertTrue(Collections.binarySearch(statList, fileStatus1) > -1);
}
/**
* Check that toString produces the expected output for a symlink.
*/
@Test
public void toStringSymlink() throws IOException {
boolean isdir = false;
Path symlink = new Path("symlink");
FileStatus fileStatus = new FileStatus(LENGTH, isdir, REPLICATION, BLKSIZE,
MTIME, ATIME, PERMISSION, OWNER, GROUP, symlink, PATH);
validateToString(fileStatus);
}
/**
* Validate the accessors for FileStatus.
* @param fileStatus FileStatus to checked
* @param length expected length
* @param isdir expected isDirectory
* @param replication expected replication
* @param blocksize expected blocksize
* @param mtime expected modification time
* @param atime expected access time
* @param permission expected permission
* @param owner expected owner
* @param group expected group
* @param symlink expected symlink
* @param path expected path
*/
private void validateAccessors(FileStatus fileStatus,
long length, boolean isdir, int replication, long blocksize, long mtime,
long atime, FsPermission permission, String owner, String group,
Path symlink, Path path) throws IOException {
assertEquals(length, fileStatus.getLen());
assertEquals(isdir, fileStatus.isDirectory());
assertEquals(replication, fileStatus.getReplication());
assertEquals(blocksize, fileStatus.getBlockSize());
assertEquals(mtime, fileStatus.getModificationTime());
assertEquals(atime, fileStatus.getAccessTime());
assertEquals(permission, fileStatus.getPermission());
assertEquals(owner, fileStatus.getOwner());
assertEquals(group, fileStatus.getGroup());
if(symlink == null) {
assertFalse(fileStatus.isSymlink());
} else {
assertTrue(fileStatus.isSymlink());
assertEquals(symlink, fileStatus.getSymlink());
}
assertEquals(path, fileStatus.getPath());
}
/**
* Validates the toString method for FileStatus.
* @param fileStatus FileStatus to be validated
*/
private void validateToString(FileStatus fileStatus) throws IOException {
StringBuilder expected = new StringBuilder();
expected.append("FileStatus{");
expected.append("path=").append(fileStatus.getPath()).append("; ");
expected.append("isDirectory=").append(fileStatus.isDirectory()).append("; ");
if(!fileStatus.isDirectory()) {
expected.append("length=").append(fileStatus.getLen()).append("; ");
expected.append("replication=").append(fileStatus.getReplication()).append("; ");
expected.append("blocksize=").append(fileStatus.getBlockSize()).append("; ");
}
expected.append("modification_time=").append(fileStatus.getModificationTime()).append("; ");
expected.append("access_time=").append(fileStatus.getAccessTime()).append("; ");
expected.append("owner=").append(fileStatus.getOwner()).append("; ");
expected.append("group=").append(fileStatus.getGroup()).append("; ");
expected.append("permission=").append(fileStatus.getPermission()).append("; ");
if(fileStatus.isSymlink()) {
expected.append("isSymlink=").append(true).append("; ");
expected.append("symlink=").append(fileStatus.getSymlink()).append("}");
} else {
expected.append("isSymlink=").append(false).append("}");
}
assertEquals(expected.toString(), fileStatus.toString());
}
}
| 10,544 | 35.74216 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFsShellCopy {
static Configuration conf;
static FsShell shell;
static LocalFileSystem lfs;
static Path testRootDir, srcPath, dstPath;
@BeforeClass
public static void setup() throws Exception {
conf = new Configuration();
shell = new FsShell(conf);
lfs = FileSystem.getLocal(conf);
testRootDir = lfs.makeQualified(new Path(
System.getProperty("test.build.data","test/build/data"),
"testShellCopy"));
lfs.mkdirs(testRootDir);
srcPath = new Path(testRootDir, "srcFile");
dstPath = new Path(testRootDir, "dstFile");
}
@Before
public void prepFiles() throws Exception {
lfs.setVerifyChecksum(true);
lfs.setWriteChecksum(true);
lfs.delete(srcPath, true);
lfs.delete(dstPath, true);
FSDataOutputStream out = lfs.create(srcPath);
out.writeChars("hi");
out.close();
assertTrue(lfs.exists(lfs.getChecksumFile(srcPath)));
}
@Test
public void testCopyNoCrc() throws Exception {
shellRun(0, "-get", srcPath.toString(), dstPath.toString());
checkPath(dstPath, false);
}
@Test
public void testCopyCrc() throws Exception {
shellRun(0, "-get", "-crc", srcPath.toString(), dstPath.toString());
checkPath(dstPath, true);
}
@Test
public void testCorruptedCopyCrc() throws Exception {
FSDataOutputStream out = lfs.getRawFileSystem().create(srcPath);
out.writeChars("bang");
out.close();
shellRun(1, "-get", srcPath.toString(), dstPath.toString());
}
@Test
public void testCorruptedCopyIgnoreCrc() throws Exception {
shellRun(0, "-get", "-ignoreCrc", srcPath.toString(), dstPath.toString());
checkPath(dstPath, false);
}
private void checkPath(Path p, boolean expectChecksum) throws IOException {
assertTrue(lfs.exists(p));
boolean hasChecksum = lfs.exists(lfs.getChecksumFile(p));
assertEquals(expectChecksum, hasChecksum);
}
private void shellRun(int n, String ... args) throws Exception {
assertEquals(n, shell.run(args));
}
@Test
public void testCopyFileFromLocal() throws Exception {
Path testRoot = new Path(testRootDir, "testPutFile");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path targetDir = new Path(testRoot, "target");
Path filePath = new Path(testRoot, new Path("srcFile"));
lfs.create(filePath).close();
checkPut(filePath, targetDir, false);
}
@Test
public void testCopyDirFromLocal() throws Exception {
Path testRoot = new Path(testRootDir, "testPutDir");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path targetDir = new Path(testRoot, "target");
Path dirPath = new Path(testRoot, new Path("srcDir"));
lfs.mkdirs(dirPath);
lfs.create(new Path(dirPath, "srcFile")).close();
checkPut(dirPath, targetDir, false);
}
@Test
public void testCopyFileFromWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
String windowsTestRootPath = (new File(testRootDir.toUri().getPath()
.toString())).getAbsolutePath();
Path testRoot = new Path(windowsTestRootPath, "testPutFile");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path targetDir = new Path(testRoot, "target");
Path filePath = new Path(testRoot, new Path("srcFile"));
lfs.create(filePath).close();
checkPut(filePath, targetDir, true);
}
@Test
public void testCopyDirFromWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
String windowsTestRootPath = (new File(testRootDir.toUri().getPath()
.toString())).getAbsolutePath();
Path testRoot = new Path(windowsTestRootPath, "testPutDir");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path targetDir = new Path(testRoot, "target");
Path dirPath = new Path(testRoot, new Path("srcDir"));
lfs.mkdirs(dirPath);
lfs.create(new Path(dirPath, "srcFile")).close();
checkPut(dirPath, targetDir, true);
}
private void checkPut(Path srcPath, Path targetDir, boolean useWindowsPath)
throws Exception {
lfs.delete(targetDir, true);
lfs.mkdirs(targetDir);
lfs.setWorkingDirectory(targetDir);
final Path dstPath = new Path("path");
final Path childPath = new Path(dstPath, "childPath");
lfs.setWorkingDirectory(targetDir);
// copy to new file, then again
prepPut(dstPath, false, false);
checkPut(0, srcPath, dstPath, useWindowsPath);
if (lfs.isFile(srcPath)) {
checkPut(1, srcPath, dstPath, useWindowsPath);
} else { // directory works because it copies into the dir
// clear contents so the check won't think there are extra paths
prepPut(dstPath, true, true);
checkPut(0, srcPath, dstPath, useWindowsPath);
}
// copy to non-existent dir
prepPut(dstPath, false, false);
checkPut(1, srcPath, childPath, useWindowsPath);
// copy into dir, then with another name
prepPut(dstPath, true, true);
checkPut(0, srcPath, dstPath, useWindowsPath);
prepPut(childPath, true, true);
checkPut(0, srcPath, childPath, useWindowsPath);
// try to put to pwd with existing dir
prepPut(targetDir, true, true);
checkPut(0, srcPath, null, useWindowsPath);
prepPut(targetDir, true, true);
checkPut(0, srcPath, new Path("."), useWindowsPath);
// try to put to pwd with non-existent cwd
prepPut(dstPath, false, true);
lfs.setWorkingDirectory(dstPath);
checkPut(1, srcPath, null, useWindowsPath);
prepPut(dstPath, false, true);
checkPut(1, srcPath, new Path("."), useWindowsPath);
}
private void prepPut(Path dst, boolean create,
boolean isDir) throws IOException {
lfs.delete(dst, true);
assertFalse(lfs.exists(dst));
if (create) {
if (isDir) {
lfs.mkdirs(dst);
assertTrue(lfs.isDirectory(dst));
} else {
lfs.mkdirs(new Path(dst.getName()));
lfs.create(dst).close();
assertTrue(lfs.isFile(dst));
}
}
}
private void checkPut(int exitCode, Path src, Path dest,
boolean useWindowsPath) throws Exception {
String argv[] = null;
String srcPath = src.toString();
if (useWindowsPath) {
srcPath = (new File(srcPath)).getAbsolutePath();
}
if (dest != null) {
argv = new String[]{ "-put", srcPath, pathAsString(dest) };
} else {
argv = new String[]{ "-put", srcPath };
dest = new Path(Path.CUR_DIR);
}
Path target;
if (lfs.exists(dest)) {
if (lfs.isDirectory(dest)) {
target = new Path(pathAsString(dest), src.getName());
} else {
target = dest;
}
} else {
target = new Path(lfs.getWorkingDirectory(), dest);
}
boolean targetExists = lfs.exists(target);
Path parent = lfs.makeQualified(target).getParent();
System.out.println("COPY src["+src.getName()+"] -> ["+dest+"] as ["+target+"]");
String lsArgv[] = new String[]{ "-ls", "-R", pathAsString(parent) };
shell.run(lsArgv);
int gotExit = shell.run(argv);
System.out.println("copy exit:"+gotExit);
lsArgv = new String[]{ "-ls", "-R", pathAsString(parent) };
shell.run(lsArgv);
if (exitCode == 0) {
assertTrue(lfs.exists(target));
assertTrue(lfs.isFile(src) == lfs.isFile(target));
assertEquals(1, lfs.listStatus(lfs.makeQualified(target).getParent()).length);
} else {
assertEquals(targetExists, lfs.exists(target));
}
assertEquals(exitCode, gotExit);
}
@Test
public void testRepresentsDir() throws Exception {
Path subdirDstPath = new Path(dstPath, srcPath.getName());
String argv[] = null;
lfs.delete(dstPath, true);
assertFalse(lfs.exists(dstPath));
argv = new String[]{ "-put", srcPath.toString(), dstPath.toString() };
assertEquals(0, shell.run(argv));
assertTrue(lfs.exists(dstPath) && lfs.isFile(dstPath));
lfs.delete(dstPath, true);
assertFalse(lfs.exists(dstPath));
// since dst path looks like a dir, it should not copy the file and
// rename it to what looks like a directory
lfs.delete(dstPath, true); // make copy fail
for (String suffix : new String[]{ "/", "/." } ) {
argv = new String[]{
"-put", srcPath.toString(), dstPath.toString()+suffix };
assertEquals(1, shell.run(argv));
assertFalse(lfs.exists(dstPath));
assertFalse(lfs.exists(subdirDstPath));
}
// since dst path looks like a dir, it should not copy the file and
// rename it to what looks like a directory
for (String suffix : new String[]{ "/", "/." } ) {
// empty out the directory and create to make copy succeed
lfs.delete(dstPath, true);
lfs.mkdirs(dstPath);
argv = new String[]{
"-put", srcPath.toString(), dstPath.toString()+suffix };
assertEquals(0, shell.run(argv));
assertTrue(lfs.exists(subdirDstPath));
assertTrue(lfs.isFile(subdirDstPath));
}
// ensure .. is interpreted as a dir
String dotdotDst = dstPath+"/foo/..";
lfs.delete(dstPath, true);
lfs.mkdirs(new Path(dstPath, "foo"));
argv = new String[]{ "-put", srcPath.toString(), dotdotDst };
assertEquals(0, shell.run(argv));
assertTrue(lfs.exists(subdirDstPath));
assertTrue(lfs.isFile(subdirDstPath));
}
@Test
public void testCopyMerge() throws Exception {
Path root = new Path(testRootDir, "TestMerge");
Path f1 = new Path(root, "f1");
Path f2 = new Path(root, "f2");
Path f3 = new Path(root, "f3");
Path fnf = new Path(root, "fnf");
Path d = new Path(root, "dir");
Path df1 = new Path(d, "df1");
Path df2 = new Path(d, "df2");
Path df3 = new Path(d, "df3");
createFile(f1, f2, f3, df1, df2, df3);
int exit;
// one file, kind of silly
exit = shell.run(new String[]{
"-getmerge",
f1.toString(),
"out" });
assertEquals(0, exit);
assertEquals("f1", readFile("out"));
exit = shell.run(new String[]{
"-getmerge",
fnf.toString(),
"out" });
assertEquals(1, exit);
assertFalse(lfs.exists(new Path("out")));
// two files
exit = shell.run(new String[]{
"-getmerge",
f1.toString(), f2.toString(),
"out" });
assertEquals(0, exit);
assertEquals("f1f2", readFile("out"));
// two files, preserves order
exit = shell.run(new String[]{
"-getmerge",
f2.toString(), f1.toString(),
"out" });
assertEquals(0, exit);
assertEquals("f2f1", readFile("out"));
// two files
exit = shell.run(new String[]{
"-getmerge", "-nl",
f1.toString(), f2.toString(),
"out" });
assertEquals(0, exit);
assertEquals("f1\nf2\n", readFile("out"));
// glob three files
shell.run(new String[]{
"-getmerge", "-nl",
new Path(root, "f*").toString(),
"out" });
assertEquals(0, exit);
assertEquals("f1\nf2\nf3\n", readFile("out"));
// directory with 3 files, should skip subdir
shell.run(new String[]{
"-getmerge", "-nl",
root.toString(),
"out" });
assertEquals(0, exit);
assertEquals("f1\nf2\nf3\n", readFile("out"));
// subdir
shell.run(new String[]{
"-getmerge", "-nl",
d.toString(), "out"});
assertEquals(0, exit);
assertEquals("df1\ndf2\ndf3\n", readFile("out"));
// file, dir, file
shell.run(new String[]{
"-getmerge", "-nl",
f1.toString(), d.toString(), f2.toString(), "out" });
assertEquals(0, exit);
assertEquals("f1\ndf1\ndf2\ndf3\nf2\n", readFile("out"));
}
@Test
public void testMoveFileFromLocal() throws Exception {
Path testRoot = new Path(testRootDir, "testPutFile");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path target = new Path(testRoot, "target");
Path srcFile = new Path(testRoot, new Path("srcFile"));
lfs.createNewFile(srcFile);
int exit = shell.run(new String[]{
"-moveFromLocal", srcFile.toString(), target.toString() });
assertEquals(0, exit);
assertFalse(lfs.exists(srcFile));
assertTrue(lfs.exists(target));
assertTrue(lfs.isFile(target));
}
@Test
public void testMoveDirFromLocal() throws Exception {
Path testRoot = new Path(testRootDir, "testPutDir");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path srcDir = new Path(testRoot, "srcDir");
lfs.mkdirs(srcDir);
Path targetDir = new Path(testRoot, "target");
int exit = shell.run(new String[]{
"-moveFromLocal", srcDir.toString(), targetDir.toString() });
assertEquals(0, exit);
assertFalse(lfs.exists(srcDir));
assertTrue(lfs.exists(targetDir));
}
@Test
public void testMoveDirFromLocalDestExists() throws Exception {
Path testRoot = new Path(testRootDir, "testPutDir");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path srcDir = new Path(testRoot, "srcDir");
lfs.mkdirs(srcDir);
Path targetDir = new Path(testRoot, "target");
lfs.mkdirs(targetDir);
int exit = shell.run(new String[]{
"-moveFromLocal", srcDir.toString(), targetDir.toString() });
assertEquals(0, exit);
assertFalse(lfs.exists(srcDir));
assertTrue(lfs.exists(new Path(targetDir, srcDir.getName())));
lfs.mkdirs(srcDir);
exit = shell.run(new String[]{
"-moveFromLocal", srcDir.toString(), targetDir.toString() });
assertEquals(1, exit);
assertTrue(lfs.exists(srcDir));
}
@Test
public void testMoveFromWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
Path testRoot = new Path(testRootDir, "testPutFile");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path target = new Path(testRoot, "target");
Path srcFile = new Path(testRoot, new Path("srcFile"));
lfs.createNewFile(srcFile);
String winSrcFile = (new File(srcFile.toUri().getPath()
.toString())).getAbsolutePath();
shellRun(0, "-moveFromLocal", winSrcFile, target.toString());
assertFalse(lfs.exists(srcFile));
assertTrue(lfs.exists(target));
assertTrue(lfs.isFile(target));
}
@Test
public void testGetWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
String winDstFile = (new File(dstPath.toUri().getPath()
.toString())).getAbsolutePath();
shellRun(0, "-get", srcPath.toString(), winDstFile);
checkPath(dstPath, false);
}
private void createFile(Path ... paths) throws IOException {
for (Path path : paths) {
FSDataOutputStream out = lfs.create(path);
out.write(path.getName().getBytes());
out.close();
}
}
private String readFile(String out) throws IOException {
Path path = new Path(out);
FileStatus stat = lfs.getFileStatus(path);
FSDataInputStream in = lfs.open(path);
byte[] buffer = new byte[(int)stat.getLen()];
in.readFully(buffer);
in.close();
lfs.delete(path, false);
return new String(buffer);
}
// path handles "." rather oddly
private String pathAsString(Path p) {
String s = (p == null) ? Path.CUR_DIR : p.toString();
return s.isEmpty() ? Path.CUR_DIR : s;
}
}
| 16,315 | 30.805068 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.util.regex.PatternSyntaxException;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Tests for glob patterns
*/
public class TestGlobPattern {
private void assertMatch(boolean yes, String glob, String...input) {
GlobPattern pattern = new GlobPattern(glob);
for (String s : input) {
boolean result = pattern.matches(s);
assertTrue(glob +" should"+ (yes ? "" : " not") +" match "+ s,
yes ? result : !result);
}
}
private void shouldThrow(String... globs) {
for (String glob : globs) {
try {
GlobPattern.compile(glob);
}
catch (PatternSyntaxException e) {
e.printStackTrace();
continue;
}
assertTrue("glob "+ glob +" should throw", false);
}
}
@Test public void testValidPatterns() {
assertMatch(true, "*", "^$", "foo", "bar");
assertMatch(true, "?", "?", "^", "[", "]", "$");
assertMatch(true, "foo*", "foo", "food", "fool");
assertMatch(true, "f*d", "fud", "food");
assertMatch(true, "*d", "good", "bad");
assertMatch(true, "\\*\\?\\[\\{\\\\", "*?[{\\");
assertMatch(true, "[]^-]", "]", "-", "^");
assertMatch(true, "]", "]");
assertMatch(true, "^.$()|+", "^.$()|+");
assertMatch(true, "[^^]", ".", "$", "[", "]");
assertMatch(false, "[^^]", "^");
assertMatch(true, "[!!-]", "^", "?");
assertMatch(false, "[!!-]", "!", "-");
assertMatch(true, "{[12]*,[45]*,[78]*}", "1", "2!", "4", "42", "7", "7$");
assertMatch(false, "{[12]*,[45]*,[78]*}", "3", "6", "9ß");
assertMatch(true, "}", "}");
}
@Test public void testInvalidPatterns() {
shouldThrow("[", "[[]]", "[][]", "{", "\\");
}
}
| 2,538 | 32.853333 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.Options.CreateOpts;
/**
* Abstraction of filesystem functionality with additional helper methods
* commonly used in tests. This allows generic tests to be written which apply
* to the two filesystem abstractions in Hadoop: {@link FileSystem} and
* {@link FileContext}.
*/
public abstract class FSTestWrapper implements FSWrapper {
//
// Test helper methods taken from FileContextTestHelper
//
protected static final int DEFAULT_BLOCK_SIZE = 1024;
protected static final int DEFAULT_NUM_BLOCKS = 2;
protected String testRootDir = null;
protected String absTestRootDir = null;
public FSTestWrapper(String testRootDir) {
// Use default test dir if not provided
if (testRootDir == null || testRootDir.isEmpty()) {
testRootDir = System.getProperty("test.build.data", "build/test/data");
}
// salt test dir with some random digits for safe parallel runs
this.testRootDir = testRootDir + "/"
+ RandomStringUtils.randomAlphanumeric(10);
}
public static byte[] getFileData(int numOfBlocks, long blockSize) {
byte[] data = new byte[(int) (numOfBlocks * blockSize)];
for (int i = 0; i < data.length; i++) {
data[i] = (byte) (i % 10);
}
return data;
}
public Path getTestRootPath() {
return makeQualified(new Path(testRootDir));
}
public Path getTestRootPath(String pathString) {
return makeQualified(new Path(testRootDir, pathString));
}
// the getAbsolutexxx method is needed because the root test dir
// can be messed up by changing the working dir.
public String getAbsoluteTestRootDir() throws IOException {
if (absTestRootDir == null) {
Path testRootPath = new Path(testRootDir);
if (testRootPath.isAbsolute()) {
absTestRootDir = testRootDir;
} else {
absTestRootDir = getWorkingDirectory().toString() + "/"
+ testRootDir;
}
}
return absTestRootDir;
}
public Path getAbsoluteTestRootPath() throws IOException {
return makeQualified(new Path(getAbsoluteTestRootDir()));
}
abstract public FSTestWrapper getLocalFSWrapper()
throws UnsupportedFileSystemException, IOException;
abstract public Path getDefaultWorkingDirectory() throws IOException;
/*
* Create files with numBlocks blocks each with block size blockSize.
*/
abstract public long createFile(Path path, int numBlocks,
CreateOpts... options) throws IOException;
abstract public long createFile(Path path, int numBlocks, int blockSize)
throws IOException;
abstract public long createFile(Path path) throws IOException;
abstract public long createFile(String name) throws IOException;
abstract public long createFileNonRecursive(String name) throws IOException;
abstract public long createFileNonRecursive(Path path) throws IOException;
abstract public void appendToFile(Path path, int numBlocks,
CreateOpts... options) throws IOException;
abstract public boolean exists(Path p) throws IOException;
abstract public boolean isFile(Path p) throws IOException;
abstract public boolean isDir(Path p) throws IOException;
abstract public boolean isSymlink(Path p) throws IOException;
abstract public void writeFile(Path path, byte b[]) throws IOException;
abstract public byte[] readFile(Path path, int len) throws IOException;
abstract public FileStatus containsPath(Path path, FileStatus[] dirList)
throws IOException;
abstract public FileStatus containsPath(String path, FileStatus[] dirList)
throws IOException;
enum fileType {
isDir, isFile, isSymlink
};
abstract public void checkFileStatus(String path, fileType expectedType)
throws IOException;
abstract public void checkFileLinkStatus(String path, fileType expectedType)
throws IOException;
}
| 4,752 | 32.471831 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
/**
* test for the input truncation bug when mark/reset is used.
* HADOOP-1489
*/
public class TestTruncatedInputBug extends TestCase {
private static String TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp"))
.toString().replace(' ', '+');
private void writeFile(FileSystem fileSys,
Path name, int nBytesToWrite)
throws IOException {
DataOutputStream out = fileSys.create(name);
for (int i = 0; i < nBytesToWrite; ++i) {
out.writeByte(0);
}
out.close();
}
/**
* When mark() is used on BufferedInputStream, the request
* size on the checksum file system can be small. However,
* checksum file system currently depends on the request size
* >= bytesPerSum to work properly.
*/
public void testTruncatedInputBug() throws IOException {
final int ioBufSize = 512;
final int fileSize = ioBufSize*4;
int filePos = 0;
Configuration conf = new Configuration();
conf.setInt("io.file.buffer.size", ioBufSize);
FileSystem fileSys = FileSystem.getLocal(conf);
try {
// First create a test input file.
Path testFile = new Path(TEST_ROOT_DIR, "HADOOP-1489");
writeFile(fileSys, testFile, fileSize);
assertTrue(fileSys.exists(testFile));
assertTrue(fileSys.getFileStatus(testFile).getLen() == fileSize);
// Now read the file for ioBufSize bytes
FSDataInputStream in = fileSys.open(testFile, ioBufSize);
// seek beyond data buffered by open
filePos += ioBufSize * 2 + (ioBufSize - 10);
in.seek(filePos);
// read 4 more bytes before marking
for (int i = 0; i < 4; ++i) {
if (in.read() == -1) {
break;
}
++filePos;
}
// Now set mark() to trigger the bug
// NOTE: in the fixed code, mark() does nothing (not supported) and
// hence won't trigger this bug.
in.mark(1);
System.out.println("MARKED");
// Try to read the rest
while (filePos < fileSize) {
if (in.read() == -1) {
break;
}
++filePos;
}
in.close();
System.out.println("Read " + filePos + " bytes."
+ " file size=" + fileSize);
assertTrue(filePos == fileSize);
} finally {
try {
fileSys.close();
} catch (Exception e) {
// noop
}
}
} // end testTruncatedInputBug
}
| 3,429 | 30.181818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import org.apache.hadoop.conf.Configuration;
import org.junit.*;
import static org.junit.Assert.*;
public class TestChecksumFileSystem {
static final String TEST_ROOT_DIR
= System.getProperty("test.build.data","build/test/data/work-dir/localfs");
static LocalFileSystem localFs;
@Before
public void resetLocalFs() throws Exception {
localFs = FileSystem.getLocal(new Configuration());
localFs.setVerifyChecksum(true);
}
@Test
public void testgetChecksumLength() throws Exception {
assertEquals(8, ChecksumFileSystem.getChecksumLength(0L, 512));
assertEquals(12, ChecksumFileSystem.getChecksumLength(1L, 512));
assertEquals(12, ChecksumFileSystem.getChecksumLength(512L, 512));
assertEquals(16, ChecksumFileSystem.getChecksumLength(513L, 512));
assertEquals(16, ChecksumFileSystem.getChecksumLength(1023L, 512));
assertEquals(16, ChecksumFileSystem.getChecksumLength(1024L, 512));
assertEquals(408, ChecksumFileSystem.getChecksumLength(100L, 1));
assertEquals(4000000000008L,
ChecksumFileSystem.getChecksumLength(10000000000000L, 10));
}
@Test
public void testVerifyChecksum() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testPath");
Path testPath11 = new Path(TEST_ROOT_DIR, "testPath11");
FSDataOutputStream fout = localFs.create(testPath);
fout.write("testing".getBytes());
fout.close();
fout = localFs.create(testPath11);
fout.write("testing you".getBytes());
fout.close();
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
readFile(localFs, testPath, 128);
readFile(localFs, testPath, 511);
readFile(localFs, testPath, 512);
readFile(localFs, testPath, 513);
readFile(localFs, testPath, 1023);
readFile(localFs, testPath, 1024);
readFile(localFs, testPath, 1025);
localFs.delete(localFs.getChecksumFile(testPath), true);
assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
//copying the wrong checksum file
FileUtil.copy(localFs, localFs.getChecksumFile(testPath11), localFs,
localFs.getChecksumFile(testPath),false,true,localFs.getConf());
assertTrue("checksum exists", localFs.exists(localFs.getChecksumFile(testPath)));
boolean errorRead = false;
try {
readFile(localFs, testPath, 1024);
}catch(ChecksumException ie) {
errorRead = true;
}
assertTrue("error reading", errorRead);
//now setting verify false, the read should succeed
localFs.setVerifyChecksum(false);
String str = readFile(localFs, testPath, 1024).toString();
assertTrue("read", "testing".equals(str));
}
@Test
public void testMultiChunkFile() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testMultiChunk");
FSDataOutputStream fout = localFs.create(testPath);
for (int i = 0; i < 1000; i++) {
fout.write(("testing" + i).getBytes());
}
fout.close();
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
readFile(localFs, testPath, 128);
readFile(localFs, testPath, 511);
readFile(localFs, testPath, 512);
readFile(localFs, testPath, 513);
readFile(localFs, testPath, 1023);
readFile(localFs, testPath, 1024);
readFile(localFs, testPath, 1025);
}
/**
* Test to ensure that if the checksum file is truncated, a
* ChecksumException is thrown
*/
@Test
public void testTruncatedChecksum() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testtruncatedcrc");
FSDataOutputStream fout = localFs.create(testPath);
fout.write("testing truncation".getBytes());
fout.close();
// Read in the checksum
Path checksumFile = localFs.getChecksumFile(testPath);
FileSystem rawFs = localFs.getRawFileSystem();
FSDataInputStream checksumStream = rawFs.open(checksumFile);
byte buf[] = new byte[8192];
int read = checksumStream.read(buf, 0, buf.length);
checksumStream.close();
// Now rewrite the checksum file with the last byte missing
FSDataOutputStream replaceStream = rawFs.create(checksumFile);
replaceStream.write(buf, 0, read - 1);
replaceStream.close();
// Now reading the file should fail with a ChecksumException
try {
readFile(localFs, testPath, 1024);
fail("Did not throw a ChecksumException when reading truncated " +
"crc file");
} catch(ChecksumException ie) {
}
// telling it not to verify checksums, should avoid issue.
localFs.setVerifyChecksum(false);
String str = readFile(localFs, testPath, 1024).toString();
assertTrue("read", "testing truncation".equals(str));
}
@Test
public void testStreamType() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testStreamType");
localFs.create(testPath).close();
FSDataInputStream in = null;
localFs.setVerifyChecksum(true);
in = localFs.open(testPath);
assertTrue("stream is input checker",
in.getWrappedStream() instanceof FSInputChecker);
localFs.setVerifyChecksum(false);
in = localFs.open(testPath);
assertFalse("stream is not input checker",
in.getWrappedStream() instanceof FSInputChecker);
}
@Test
public void testCorruptedChecksum() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testCorruptChecksum");
Path checksumPath = localFs.getChecksumFile(testPath);
// write a file to generate checksum
FSDataOutputStream out = localFs.create(testPath, true);
out.write("testing 1 2 3".getBytes());
out.close();
assertTrue(localFs.exists(checksumPath));
FileStatus stat = localFs.getFileStatus(checksumPath);
// alter file directly so checksum is invalid
out = localFs.getRawFileSystem().create(testPath, true);
out.write("testing stale checksum".getBytes());
out.close();
assertTrue(localFs.exists(checksumPath));
// checksum didn't change on disk
assertEquals(stat, localFs.getFileStatus(checksumPath));
Exception e = null;
try {
localFs.setVerifyChecksum(true);
readFile(localFs, testPath, 1024);
} catch (ChecksumException ce) {
e = ce;
} finally {
assertNotNull("got checksum error", e);
}
localFs.setVerifyChecksum(false);
String str = readFile(localFs, testPath, 1024);
assertEquals("testing stale checksum", str);
}
@Test
public void testRenameFileToFile() throws Exception {
Path srcPath = new Path(TEST_ROOT_DIR, "testRenameSrc");
Path dstPath = new Path(TEST_ROOT_DIR, "testRenameDst");
verifyRename(srcPath, dstPath, false);
}
@Test
public void testRenameFileIntoDir() throws Exception {
Path srcPath = new Path(TEST_ROOT_DIR, "testRenameSrc");
Path dstPath = new Path(TEST_ROOT_DIR, "testRenameDir");
localFs.mkdirs(dstPath);
verifyRename(srcPath, dstPath, true);
}
@Test
public void testRenameFileIntoDirFile() throws Exception {
Path srcPath = new Path(TEST_ROOT_DIR, "testRenameSrc");
Path dstPath = new Path(TEST_ROOT_DIR, "testRenameDir/testRenameDst");
assertTrue(localFs.mkdirs(dstPath));
verifyRename(srcPath, dstPath, false);
}
void verifyRename(Path srcPath, Path dstPath, boolean dstIsDir)
throws Exception {
localFs.delete(srcPath,true);
localFs.delete(dstPath,true);
Path realDstPath = dstPath;
if (dstIsDir) {
localFs.mkdirs(dstPath);
realDstPath = new Path(dstPath, srcPath.getName());
}
// ensure file + checksum are moved
writeFile(localFs, srcPath, 1);
assertTrue(localFs.exists(localFs.getChecksumFile(srcPath)));
assertTrue(localFs.rename(srcPath, dstPath));
assertTrue(localFs.exists(localFs.getChecksumFile(realDstPath)));
// create a file with no checksum, rename, ensure dst checksum is removed
writeFile(localFs.getRawFileSystem(), srcPath, 1);
assertFalse(localFs.exists(localFs.getChecksumFile(srcPath)));
assertTrue(localFs.rename(srcPath, dstPath));
assertFalse(localFs.exists(localFs.getChecksumFile(realDstPath)));
// create file with checksum, rename over prior dst with no checksum
writeFile(localFs, srcPath, 1);
assertTrue(localFs.exists(localFs.getChecksumFile(srcPath)));
assertTrue(localFs.rename(srcPath, dstPath));
assertTrue(localFs.exists(localFs.getChecksumFile(realDstPath)));
}
}
| 9,538 | 35.547893 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.Before;
/**
* Test Util for localFs using FileContext API.
*/
public class TestFcLocalFsUtil extends
FileContextUtilBase {
@Override
@Before
public void setUp() throws Exception {
fc = FileContext.getLocalFSFileContext();
super.setUp();
}
}
| 1,118 | 30.971429 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Shell;
import java.io.*;
import java.util.*;
import junit.framework.*;
/**
* This class tests the local file system via the FileSystem abstraction.
*/
public class TestLocalFileSystemPermission extends TestCase {
static final String TEST_PATH_PREFIX = new Path(System.getProperty(
"test.build.data", "/tmp")).toString().replace(' ', '_')
+ "/" + TestLocalFileSystemPermission.class.getSimpleName() + "_";
{
try {
((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
.setLevel(org.apache.log4j.Level.DEBUG);
}
catch(Exception e) {
System.out.println("Cannot change log level\n"
+ StringUtils.stringifyException(e));
}
}
private Path writeFile(FileSystem fs, String name) throws IOException {
Path f = new Path(TEST_PATH_PREFIX + name);
FSDataOutputStream stm = fs.create(f);
stm.writeBytes("42\n");
stm.close();
return f;
}
private void cleanupFile(FileSystem fs, Path name) throws IOException {
assertTrue(fs.exists(name));
fs.delete(name, true);
assertTrue(!fs.exists(name));
}
/** Test LocalFileSystem.setPermission */
public void testLocalFSsetPermission() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
Configuration conf = new Configuration();
LocalFileSystem localfs = FileSystem.getLocal(conf);
String filename = "foo";
Path f = writeFile(localfs, filename);
try {
FsPermission initialPermission = getPermission(localfs, f);
System.out.println(filename + ": " + initialPermission);
assertEquals(FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), initialPermission);
}
catch(Exception e) {
System.out.println(StringUtils.stringifyException(e));
System.out.println("Cannot run test");
return;
}
try {
// create files and manipulate them.
FsPermission all = new FsPermission((short)0777);
FsPermission none = new FsPermission((short)0);
localfs.setPermission(f, none);
assertEquals(none, getPermission(localfs, f));
localfs.setPermission(f, all);
assertEquals(all, getPermission(localfs, f));
}
finally {cleanupFile(localfs, f);}
}
FsPermission getPermission(LocalFileSystem fs, Path p) throws IOException {
return fs.getFileStatus(p).getPermission();
}
/** Test LocalFileSystem.setOwner */
public void testLocalFSsetOwner() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
Configuration conf = new Configuration();
LocalFileSystem localfs = FileSystem.getLocal(conf);
String filename = "bar";
Path f = writeFile(localfs, filename);
List<String> groups = null;
try {
groups = getGroups();
System.out.println(filename + ": " + getPermission(localfs, f));
}
catch(IOException e) {
System.out.println(StringUtils.stringifyException(e));
System.out.println("Cannot run test");
return;
}
if (groups == null || groups.size() < 1) {
System.out.println("Cannot run test: need at least one group. groups="
+ groups);
return;
}
// create files and manipulate them.
try {
String g0 = groups.get(0);
localfs.setOwner(f, null, g0);
assertEquals(g0, getGroup(localfs, f));
if (groups.size() > 1) {
String g1 = groups.get(1);
localfs.setOwner(f, null, g1);
assertEquals(g1, getGroup(localfs, f));
} else {
System.out.println("Not testing changing the group since user " +
"belongs to only one group.");
}
}
finally {cleanupFile(localfs, f);}
}
static List<String> getGroups() throws IOException {
List<String> a = new ArrayList<String>();
String s = Shell.execCommand(Shell.getGroupsCommand());
for(StringTokenizer t = new StringTokenizer(s); t.hasMoreTokens(); ) {
a.add(t.nextToken());
}
return a;
}
String getGroup(LocalFileSystem fs, Path p) throws IOException {
return fs.getFileStatus(p).getGroup();
}
}
| 5,211 | 31.575 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.loadGenerator;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Preconditions;
/** The load generator is a tool for testing NameNode behavior under
* different client loads. Note there is a subclass of this clas that lets
* you run a the load generator as a MapReduce job (see LoadGeneratorMR in the
* MapReduce project.
*
* The loadGenerator allows the user to generate different mixes of read, write,
* and list requests by specifying the probabilities of read and
* write. The user controls the intensity of the load by
* adjusting parameters for the number of worker threads and the delay
* between operations. While load generators are running, the user
* can profile and monitor the running of the NameNode. When a load
* generator exits, it print some NameNode statistics like the average
* execution time of each kind of operations and the NameNode
* throughput.
*
* The program can run in one of two forms. As a regular single process command
* that runs multiple threads to generate load on the NN or as a Map Reduce
* program that runs multiple (multi-threaded) map tasks that generate load
* on the NN; the results summary is generated by a single reduce task.
*
*
* The user may either specify constant duration, read and write
* probabilities via the command line, or may specify a text file
* that acts as a script of which read and write probabilities to
* use for specified durations. If no duration is specified the program
* runs till killed (duration required if run as MapReduce).
*
* The script takes the form of lines of duration in seconds, read
* probability and write probability, each separated by white space.
* Blank lines and lines starting with # (comments) are ignored. If load
* generator is run as a MapReduce program then the script file needs to be
* accessible on the the Map task as a HDFS file.
*
* After command line argument parsing and data initialization,
* the load generator spawns the number of worker threads
* as specified by the user.
* Each thread sends a stream of requests to the NameNode.
* For each iteration, it first decides if it is going to read a file,
* create a file, or listing a directory following the read and write
* probabilities specified by the user.
* When reading, it randomly picks a file in the test space and reads
* the entire file. When writing, it randomly picks a directory in the
* test space and creates a file whose name consists of the current
* machine's host name and the thread id. The length of the file
* follows Gaussian distribution with an average size of 2 blocks and
* the standard deviation of 1 block. The new file is filled with 'a'.
* Immediately after the file creation completes, the file is deleted
* from the test space.
* While listing, it randomly picks a directory in the test space and
* list the directory content.
* Between two consecutive operations, the thread pauses for a random
* amount of time in the range of [0, maxDelayBetweenOps]
* if the specified max delay is not zero.
* All threads are stopped when the specified elapsed time has passed
* in command-line execution, or all the lines of script have been
* executed, if using a script.
* Before exiting, the program prints the average execution for
* each kind of NameNode operations, and the number of requests
* served by the NameNode.
*
* The synopsis of the command is
* java LoadGenerator
* -readProbability <read probability>: read probability [0, 1]
* with a default value of 0.3333.
* -writeProbability <write probability>: write probability [0, 1]
* with a default value of 0.3333.
* -root <root>: test space with a default value of /testLoadSpace
* -maxDelayBetweenOps <maxDelayBetweenOpsInMillis>:
* Max delay in the unit of milliseconds between two operations with a
* default value of 0 indicating no delay.
* -numOfThreads <numOfThreads>:
* number of threads to spawn with a default value of 200.
* -elapsedTime <elapsedTimeInSecs>:
* the elapsed time of program with a default value of 0
* indicating running forever
* -startTime <startTimeInMillis> : when the threads start to run.
* -scriptFile <file name>: text file to parse for scripted operation
*/
public class LoadGenerator extends Configured implements Tool {
public static final Log LOG = LogFactory.getLog(LoadGenerator.class);
private volatile static boolean shouldRun = true;
protected static Path root = DataGenerator.DEFAULT_ROOT;
private static FileContext fc;
protected static int maxDelayBetweenOps = 0;
protected static int numOfThreads = 200;
protected static long [] durations = {0};
protected static double [] readProbs = {0.3333};
protected static double [] writeProbs = {0.3333};
private static volatile int currentIndex = 0;
protected static long totalTime = 0;
protected static long startTime = Time.now()+10000;
final static private int BLOCK_SIZE = 10;
private static ArrayList<String> files = new ArrayList<String>(); // a table of file names
private static ArrayList<String> dirs = new ArrayList<String>(); // a table of directory names
protected static Random r = null;
protected static long seed = 0;
protected static String scriptFile = null;
protected static final String FLAGFILE_DEFAULT = "/tmp/flagFile";
protected static Path flagFile = new Path(FLAGFILE_DEFAULT);
protected String hostname;
final private static String USAGE_CMD = "java LoadGenerator\n";
final protected static String USAGE_ARGS =
"-readProbability <read probability>\n" +
"-writeProbability <write probability>\n" +
"-root <root>\n" +
"-maxDelayBetweenOps <maxDelayBetweenOpsInMillis>\n" +
"-numOfThreads <numOfThreads>\n" +
"-elapsedTime <elapsedTimeInSecs>\n" +
"-startTime <startTimeInMillis>\n" +
"-scriptFile <filename>\n" +
"-flagFile <filename>";
final private static String USAGE = USAGE_CMD + USAGE_ARGS;
private final byte[] WRITE_CONTENTS = new byte[4096];
private static final int ERR_TEST_FAILED = 2;
/** Constructor */
public LoadGenerator() throws IOException, UnknownHostException {
InetAddress addr = InetAddress.getLocalHost();
hostname = addr.getHostName();
Arrays.fill(WRITE_CONTENTS, (byte) 'a');
}
public LoadGenerator(Configuration conf) throws IOException, UnknownHostException {
this();
setConf(conf);
}
protected final static int OPEN = 0;
protected final static int LIST = 1;
protected final static int CREATE = 2;
protected final static int WRITE_CLOSE = 3;
protected final static int DELETE = 4;
protected final static int TOTAL_OP_TYPES =5;
protected static long [] executionTime = new long[TOTAL_OP_TYPES];
protected static long [] numOfOps = new long[TOTAL_OP_TYPES];
protected static long totalOps = 0; // across all of types
/** A thread sends a stream of requests to the NameNode.
* At each iteration, it first decides if it is going to read a file,
* create a file, or listing a directory following the read
* and write probabilities.
* When reading, it randomly picks a file in the test space and reads
* the entire file. When writing, it randomly picks a directory in the
* test space and creates a file whose name consists of the current
* machine's host name and the thread id. The length of the file
* follows Gaussian distribution with an average size of 2 blocks and
* the standard deviation of 1 block. The new file is filled with 'a'.
* Immediately after the file creation completes, the file is deleted
* from the test space.
* While listing, it randomly picks a directory in the test space and
* list the directory content.
* Between two consecutive operations, the thread pauses for a random
* amount of time in the range of [0, maxDelayBetweenOps]
* if the specified max delay is not zero.
* A thread runs for the specified elapsed time if the time isn't zero.
* Otherwise, it runs forever.
*/
private class DFSClientThread extends Thread {
private int id;
private long [] executionTime = new long[TOTAL_OP_TYPES];
private long [] totalNumOfOps = new long[TOTAL_OP_TYPES];
private byte[] buffer = new byte[1024];
private boolean failed;
private DFSClientThread(int id) {
this.id = id;
}
/** Main loop for each thread
* Each iteration decides what's the next operation and then pauses.
*/
@Override
public void run() {
try {
while (shouldRun) {
nextOp();
delay();
}
} catch (Exception ioe) {
System.err.println(ioe.getLocalizedMessage());
ioe.printStackTrace();
failed = true;
}
}
/** Let the thread pause for a random amount of time in the range of
* [0, maxDelayBetweenOps] if the delay is not zero. Otherwise, no pause.
*/
private void delay() throws InterruptedException {
if (maxDelayBetweenOps>0) {
int delay = r.nextInt(maxDelayBetweenOps);
Thread.sleep(delay);
}
}
/** Perform the next operation.
*
* Depending on the read and write probabilities, the next
* operation could be either read, write, or list.
*/
private void nextOp() throws IOException {
double rn = r.nextDouble();
int i = currentIndex;
if(LOG.isDebugEnabled())
LOG.debug("Thread " + this.id + " moving to index " + i);
if (rn < readProbs[i]) {
read();
} else if (rn < readProbs[i] + writeProbs[i]) {
write();
} else {
list();
}
}
/** Read operation randomly picks a file in the test space and reads
* the entire file */
private void read() throws IOException {
String fileName = files.get(r.nextInt(files.size()));
long startTime = Time.now();
InputStream in = fc.open(new Path(fileName));
executionTime[OPEN] += (Time.now()-startTime);
totalNumOfOps[OPEN]++;
while (in.read(buffer) != -1) {}
in.close();
}
/** The write operation randomly picks a directory in the
* test space and creates a file whose name consists of the current
* machine's host name and the thread id. The length of the file
* follows Gaussian distribution with an average size of 2 blocks and
* the standard deviation of 1 block. The new file is filled with 'a'.
* Immediately after the file creation completes, the file is deleted
* from the test space.
*/
private void write() throws IOException {
String dirName = dirs.get(r.nextInt(dirs.size()));
Path file = new Path(dirName, hostname+id);
double fileSize = 0;
while ((fileSize = r.nextGaussian()+2)<=0) {}
genFile(file, (long)(fileSize*BLOCK_SIZE));
long startTime = Time.now();
fc.delete(file, true);
executionTime[DELETE] += (Time.now()-startTime);
totalNumOfOps[DELETE]++;
}
/** The list operation randomly picks a directory in the test space and
* list the directory content.
*/
private void list() throws IOException {
String dirName = dirs.get(r.nextInt(dirs.size()));
long startTime = Time.now();
fc.listStatus(new Path(dirName));
executionTime[LIST] += (Time.now()-startTime);
totalNumOfOps[LIST]++;
}
/** Create a file with a length of <code>fileSize</code>.
* The file is filled with 'a'.
*/
private void genFile(Path file, long fileSize) throws IOException {
long startTime = Time.now();
FSDataOutputStream out = null;
try {
out = fc.create(file,
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
CreateOpts.createParent(), CreateOpts.bufferSize(4096),
CreateOpts.repFac((short) 3));
executionTime[CREATE] += (Time.now() - startTime);
numOfOps[CREATE]++;
long i = fileSize;
while (i > 0) {
long s = Math.min(fileSize, WRITE_CONTENTS.length);
out.write(WRITE_CONTENTS, 0, (int) s);
i -= s;
}
startTime = Time.now();
executionTime[WRITE_CLOSE] += (Time.now() - startTime);
numOfOps[WRITE_CLOSE]++;
} finally {
IOUtils.cleanup(LOG, out);
}
}
}
/** Main function called by tool runner.
* It first initializes data by parsing the command line arguments.
* It then calls the loadGenerator
*/
@Override
public int run(String[] args) throws Exception {
int exitCode = parseArgs(false, args);
if (exitCode != 0) {
return exitCode;
}
System.out.println("Running LoadGenerator against fileSystem: " +
FileContext.getFileContext().getDefaultFileSystem().getUri());
exitCode = generateLoadOnNN();
printResults(System.out);
return exitCode;
}
boolean stopFileCreated() {
try {
fc.getFileStatus(flagFile);
} catch (FileNotFoundException e) {
return false;
} catch (IOException e) {
LOG.error("Got error when checking if file exists:" + flagFile, e);
}
LOG.info("Flag file was created. Stopping the test.");
return true;
}
/**
* This is the main function - run threads to generate load on NN
* It starts the number of DFSClient threads as specified by
* the user.
* It stops all the threads when the specified elapsed time is passed.
*/
protected int generateLoadOnNN() throws InterruptedException {
int hostHashCode = hostname.hashCode();
if (seed == 0) {
r = new Random(System.currentTimeMillis()+hostHashCode);
} else {
r = new Random(seed+hostHashCode);
}
try {
fc = FileContext.getFileContext(getConf());
} catch (IOException ioe) {
System.err.println("Can not initialize the file system: " +
ioe.getLocalizedMessage());
return -1;
}
int status = initFileDirTables();
if (status != 0) {
return status;
}
barrier();
DFSClientThread[] threads = new DFSClientThread[numOfThreads];
for (int i=0; i<numOfThreads; i++) {
threads[i] = new DFSClientThread(i);
threads[i].start();
}
if (durations[0] > 0) {
if (durations.length == 1) {// There is a fixed run time
while (shouldRun) {
Thread.sleep(2000);
totalTime += 2;
if (totalTime >= durations[0] || stopFileCreated()) {
shouldRun = false;
}
}
} else {
// script run
while (shouldRun) {
Thread.sleep(durations[currentIndex] * 1000);
totalTime += durations[currentIndex];
// Are we on the final line of the script?
if ((currentIndex + 1) == durations.length || stopFileCreated()) {
shouldRun = false;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Moving to index " + currentIndex + ": r = "
+ readProbs[currentIndex] + ", w = " + writeProbs
+ " for duration " + durations[currentIndex]);
}
currentIndex++;
}
}
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("Done with testing. Waiting for threads to finish.");
}
boolean failed = false;
for (DFSClientThread thread : threads) {
thread.join();
for (int i=0; i<TOTAL_OP_TYPES; i++) {
executionTime[i] += thread.executionTime[i];
numOfOps[i] += thread.totalNumOfOps[i];
}
failed = failed || thread.failed;
}
int exitCode = 0;
if (failed) {
exitCode = -ERR_TEST_FAILED;
}
totalOps = 0;
for (int i=0; i<TOTAL_OP_TYPES; i++) {
totalOps += numOfOps[i];
}
return exitCode;
}
protected static void printResults(PrintStream out) throws UnsupportedFileSystemException {
out.println("Result of running LoadGenerator against fileSystem: " +
FileContext.getFileContext().getDefaultFileSystem().getUri());
if (numOfOps[OPEN] != 0) {
out.println("Average open execution time: " +
(double)executionTime[OPEN]/numOfOps[OPEN] + "ms");
}
if (numOfOps[LIST] != 0) {
out.println("Average list execution time: " +
(double)executionTime[LIST]/numOfOps[LIST] + "ms");
}
if (numOfOps[DELETE] != 0) {
out.println("Average deletion execution time: " +
(double)executionTime[DELETE]/numOfOps[DELETE] + "ms");
out.println("Average create execution time: " +
(double)executionTime[CREATE]/numOfOps[CREATE] + "ms");
out.println("Average write_close execution time: " +
(double)executionTime[WRITE_CLOSE]/numOfOps[WRITE_CLOSE] + "ms");
}
if (totalTime != 0) {
out.println("Average operations per second: " +
(double)totalOps/totalTime +"ops/s");
}
out.println();
}
/** Parse the command line arguments and initialize the data */
protected int parseArgs(boolean runAsMapReduce, String[] args) throws IOException {
try {
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-scriptFile")) {
scriptFile = args[++i];
if (durations[0] > 0) {
System.err.println("Can't specify elapsedTime and use script.");
return -1;
}
} else if (args[i].equals("-readProbability")) {
if (scriptFile != null) {
System.err.println("Can't specify probabilities and use script.");
return -1;
}
readProbs[0] = Double.parseDouble(args[++i]);
if (readProbs[0] < 0 || readProbs[0] > 1) {
System.err.println(
"The read probability must be [0, 1]: " + readProbs[0]);
return -1;
}
} else if (args[i].equals("-writeProbability")) {
if (scriptFile != null) {
System.err.println("Can't specify probabilities and use script.");
return -1;
}
writeProbs[0] = Double.parseDouble(args[++i]);
if (writeProbs[0] < 0 || writeProbs[0] > 1) {
System.err.println(
"The write probability must be [0, 1]: " + writeProbs[0]);
return -1;
}
} else if (args[i].equals("-root")) {
root = new Path(args[++i]);
} else if (args[i].equals("-maxDelayBetweenOps")) {
maxDelayBetweenOps = Integer.parseInt(args[++i]); // in milliseconds
} else if (args[i].equals("-numOfThreads")) {
numOfThreads = Integer.parseInt(args[++i]);
if (numOfThreads <= 0) {
System.err.println(
"Number of threads must be positive: " + numOfThreads);
return -1;
}
} else if (args[i].equals("-startTime")) {
startTime = Long.parseLong(args[++i]);
} else if (args[i].equals("-elapsedTime")) {
if (scriptFile != null) {
System.err.println("Can't specify elapsedTime and use script.");
return -1;
}
durations[0] = Long.parseLong(args[++i]);
} else if (args[i].equals("-seed")) {
seed = Long.parseLong(args[++i]);
r = new Random(seed);
} else if (args[i].equals("-flagFile")) {
LOG.info("got flagFile:" + flagFile);
flagFile = new Path(args[++i]);
}else {
System.err.println(USAGE);
ToolRunner.printGenericCommandUsage(System.err);
return -1;
}
}
} catch (NumberFormatException e) {
System.err.println("Illegal parameter: " + e.getLocalizedMessage());
System.err.println(USAGE);
return -1;
}
// Load Script File if not MR; for MR scriptFile is loaded by Mapper
if (!runAsMapReduce && scriptFile != null) {
if(loadScriptFile(scriptFile, true) == -1)
return -1;
}
for(int i = 0; i < readProbs.length; i++) {
if (readProbs[i] + writeProbs[i] <0 || readProbs[i]+ writeProbs[i] > 1) {
System.err.println(
"The sum of read probability and write probability must be [0, 1]: "
+ readProbs[i] + " " + writeProbs[i]);
return -1;
}
}
return 0;
}
private static void parseScriptLine(String line, ArrayList<Long> duration,
ArrayList<Double> readProb, ArrayList<Double> writeProb) {
String[] a = line.split("\\s");
if (a.length != 3) {
throw new IllegalArgumentException("Incorrect number of parameters: "
+ line);
}
try {
long d = Long.parseLong(a[0]);
double r = Double.parseDouble(a[1]);
double w = Double.parseDouble(a[2]);
Preconditions.checkArgument(d >= 0, "Invalid duration: " + d);
Preconditions.checkArgument(0 <= r && r <= 1.0,
"The read probability must be [0, 1]: " + r);
Preconditions.checkArgument(0 <= w && w <= 1.0,
"The read probability must be [0, 1]: " + w);
readProb.add(r);
duration.add(d);
writeProb.add(w);
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException("Cannot parse: " + line);
}
}
/**
* Read a script file of the form: lines of text with duration in seconds,
* read probability and write probability, separated by white space.
*
* @param filename Script file
* @return 0 if successful, -1 if not
* @throws IOException if errors with file IO
*/
protected static int loadScriptFile(String filename, boolean readLocally) throws IOException {
FileContext fc;
if (readLocally) { // read locally - program is run without MR
fc = FileContext.getLocalFSFileContext();
} else {
fc = FileContext.getFileContext(); // use default file system
}
DataInputStream in = null;
try {
in = fc.open(new Path(filename));
} catch (IOException e) {
System.err.println("Unable to open scriptFile: " + filename);
System.exit(-1);
}
InputStreamReader inr = new InputStreamReader(in);
BufferedReader br = new BufferedReader(inr);
ArrayList<Long> duration = new ArrayList<Long>();
ArrayList<Double> readProb = new ArrayList<Double>();
ArrayList<Double> writeProb = new ArrayList<Double>();
int lineNum = 0;
String line;
// Read script, parse values, build array of duration, read and write probs
try {
while ((line = br.readLine()) != null) {
lineNum++;
if (line.startsWith("#") || line.isEmpty()) // skip comments and blanks
continue;
parseScriptLine(line, duration, readProb, writeProb);
}
} catch (IllegalArgumentException e) {
System.err.println("Line: " + lineNum + ", " + e.getMessage());
return -1;
} finally {
IOUtils.cleanup(LOG, br);
}
// Copy vectors to arrays of values, to avoid autoboxing overhead later
durations = new long[duration.size()];
readProbs = new double[readProb.size()];
writeProbs = new double[writeProb.size()];
for(int i = 0; i < durations.length; i++) {
durations[i] = duration.get(i);
readProbs[i] = readProb.get(i);
writeProbs[i] = writeProb.get(i);
}
if(durations[0] == 0)
System.err.println("Initial duration set to 0. " +
"Will loop until stopped manually.");
return 0;
}
/** Create a table that contains all directories under root and
* another table that contains all files under root.
*/
private int initFileDirTables() {
try {
initFileDirTables(root);
} catch (IOException e) {
System.err.println(e.getLocalizedMessage());
e.printStackTrace();
return -1;
}
if (dirs.isEmpty()) {
System.err.println("The test space " + root + " is empty");
return -1;
}
if (files.isEmpty()) {
System.err.println("The test space " + root +
" does not have any file");
return -1;
}
return 0;
}
/** Create a table that contains all directories under the specified path and
* another table that contains all files under the specified path and
* whose name starts with "_file_".
*/
private void initFileDirTables(Path path) throws IOException {
FileStatus[] stats = fc.util().listStatus(path);
for (FileStatus stat : stats) {
if (stat.isDirectory()) {
dirs.add(stat.getPath().toString());
initFileDirTables(stat.getPath());
} else {
Path filePath = stat.getPath();
if (filePath.getName().startsWith(StructureGenerator.FILE_NAME_PREFIX)) {
files.add(filePath.toString());
}
}
}
}
/** Returns when the current number of seconds from the epoch equals
* the command line argument given by <code>-startTime</code>.
* This allows multiple instances of this program, running on clock
* synchronized nodes, to start at roughly the same time.
*/
private static void barrier() {
long sleepTime;
while ((sleepTime = startTime - Time.now()) > 0) {
try {
Thread.sleep(sleepTime);
} catch (InterruptedException ex) {
}
}
}
/** Main program
*
* @param args command line arguments
* @throws Exception
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new LoadGenerator(), args);
System.exit(res);
}
}
| 27,461 | 35.960969 | 96 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.