repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import static org.junit.Assert.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.junit.Test;
public class TestRandomAlgorithm {
private static final int[][] parameters = new int[][] {
{5, 1, 1},
{10, 1, 2},
{10, 2, 2},
{20, 1, 3},
{20, 2, 3},
{20, 3, 3},
{100, 3, 10},
{100, 3, 100},
{100, 3, 1000},
{100, 3, 10000},
{100, 3, 100000},
{100, 3, 1000000}
};
private List<Integer> convertIntArray(int[] from) {
List<Integer> ret = new ArrayList<Integer>(from.length);
for (int v : from) {
ret.add(v);
}
return ret;
}
private void testRandomSelectSelector(int niter, int m, int n) {
RandomAlgorithms.Selector selector = new RandomAlgorithms.Selector(n,
(double) m / n, new Random());
Map<List<Integer>, Integer> results = new HashMap<List<Integer>, Integer>(
niter);
for (int i = 0; i < niter; ++i, selector.reset()) {
int[] result = new int[m];
for (int j = 0; j < m; ++j) {
int v = selector.next();
if (v < 0)
break;
result[j]=v;
}
Arrays.sort(result);
List<Integer> resultAsList = convertIntArray(result);
Integer count = results.get(resultAsList);
if (count == null) {
results.put(resultAsList, 1);
} else {
results.put(resultAsList, ++count);
}
}
verifyResults(results, m, n);
}
private void testRandomSelect(int niter, int m, int n) {
Random random = new Random();
Map<List<Integer>, Integer> results = new HashMap<List<Integer>, Integer>(
niter);
for (int i = 0; i < niter; ++i) {
int[] result = RandomAlgorithms.select(m, n, random);
Arrays.sort(result);
List<Integer> resultAsList = convertIntArray(result);
Integer count = results.get(resultAsList);
if (count == null) {
results.put(resultAsList, 1);
} else {
results.put(resultAsList, ++count);
}
}
verifyResults(results, m, n);
}
private void verifyResults(Map<List<Integer>, Integer> results, int m, int n) {
if (n>=10) {
assertTrue(results.size() >= Math.min(m, 2));
}
for (List<Integer> result : results.keySet()) {
assertEquals(m, result.size());
Set<Integer> seen = new HashSet<Integer>();
for (int v : result) {
System.out.printf("%d ", v);
assertTrue((v >= 0) && (v < n));
assertTrue(seen.add(v));
}
System.out.printf(" ==> %d\n", results.get(result));
}
System.out.println("====");
}
@Test
public void testRandomSelect() {
for (int[] param : parameters) {
testRandomSelect(param[0], param[1], param[2]);
}
}
@Test
public void testRandomSelectSelector() {
for (int[] param : parameters) {
testRandomSelectSelector(param[0], param[1], param[2]);
}
}
}
| 3,889 | 28.24812 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFileQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
public class TestFileQueue {
static final Log LOG = LogFactory.getLog(TestFileQueue.class);
static final int NFILES = 4;
static final int BLOCK = 256;
static final Path[] paths = new Path[NFILES];
static final String[] loc = new String[NFILES];
static final long[] start = new long[NFILES];
static final long[] len = new long[NFILES];
@BeforeClass
public static void setup() throws IOException {
final Configuration conf = new Configuration();
final FileSystem fs = FileSystem.getLocal(conf).getRaw();
final Path p = new Path(System.getProperty("test.build.data", "/tmp"),
"testFileQueue").makeQualified(fs);
fs.delete(p, true);
final byte[] b = new byte[BLOCK];
for (int i = 0; i < NFILES; ++i) {
Arrays.fill(b, (byte)('A' + i));
paths[i] = new Path(p, "" + (char)('A' + i));
OutputStream f = null;
try {
f = fs.create(paths[i]);
f.write(b);
} finally {
if (f != null) {
f.close();
}
}
}
}
@AfterClass
public static void cleanup() throws IOException {
final Configuration conf = new Configuration();
final FileSystem fs = FileSystem.getLocal(conf).getRaw();
final Path p = new Path(System.getProperty("test.build.data", "/tmp"),
"testFileQueue").makeQualified(fs);
fs.delete(p, true);
}
static ByteArrayOutputStream fillVerif() throws IOException {
final byte[] b = new byte[BLOCK];
final ByteArrayOutputStream out = new ByteArrayOutputStream();
for (int i = 0; i < NFILES; ++i) {
Arrays.fill(b, (byte)('A' + i));
out.write(b, 0, (int)len[i]);
}
return out;
}
@Test
public void testRepeat() throws Exception {
final Configuration conf = new Configuration();
Arrays.fill(loc, "");
Arrays.fill(start, 0L);
Arrays.fill(len, BLOCK);
final ByteArrayOutputStream out = fillVerif();
final FileQueue q =
new FileQueue(new CombineFileSplit(paths, start, len, loc), conf);
final byte[] verif = out.toByteArray();
final byte[] check = new byte[2 * NFILES * BLOCK];
q.read(check, 0, NFILES * BLOCK);
assertArrayEquals(verif, Arrays.copyOf(check, NFILES * BLOCK));
final byte[] verif2 = new byte[2 * NFILES * BLOCK];
System.arraycopy(verif, 0, verif2, 0, verif.length);
System.arraycopy(verif, 0, verif2, verif.length, verif.length);
q.read(check, 0, 2 * NFILES * BLOCK);
assertArrayEquals(verif2, check);
}
@Test
public void testUneven() throws Exception {
final Configuration conf = new Configuration();
Arrays.fill(loc, "");
Arrays.fill(start, 0L);
Arrays.fill(len, BLOCK);
final int B2 = BLOCK / 2;
for (int i = 0; i < NFILES; i += 2) {
start[i] += B2;
len[i] -= B2;
}
final FileQueue q =
new FileQueue(new CombineFileSplit(paths, start, len, loc), conf);
final ByteArrayOutputStream out = fillVerif();
final byte[] verif = out.toByteArray();
final byte[] check = new byte[NFILES / 2 * BLOCK + NFILES / 2 * B2];
q.read(check, 0, verif.length);
assertArrayEquals(verif, Arrays.copyOf(check, verif.length));
q.read(check, 0, verif.length);
assertArrayEquals(verif, Arrays.copyOf(check, verif.length));
}
@Test
public void testEmpty() throws Exception {
final Configuration conf = new Configuration();
// verify OK if unused
final FileQueue q = new FileQueue(new CombineFileSplit(
new Path[0], new long[0], new long[0], new String[0]), conf);
}
}
| 4,888 | 32.951389 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.UtilsForTests;
import org.apache.hadoop.mapred.gridmix.GenerateData.DataStatistics;
import org.apache.hadoop.mapred.gridmix.Statistics.ClusterStats;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.junit.Test;
/**
* Test {@link ExecutionSummarizer} and {@link ClusterSummarizer}.
*/
public class TestGridmixSummary {
/**
* Test {@link DataStatistics}.
*/
@Test (timeout=20000)
public void testDataStatistics() throws Exception {
// test data-statistics getters with compression enabled
DataStatistics stats = new DataStatistics(10, 2, true);
assertEquals("Data size mismatch", 10, stats.getDataSize());
assertEquals("Num files mismatch", 2, stats.getNumFiles());
assertTrue("Compression configuration mismatch", stats.isDataCompressed());
// test data-statistics getters with compression disabled
stats = new DataStatistics(100, 5, false);
assertEquals("Data size mismatch", 100, stats.getDataSize());
assertEquals("Num files mismatch", 5, stats.getNumFiles());
assertFalse("Compression configuration mismatch", stats.isDataCompressed());
// test publish data stats
Configuration conf = new Configuration();
Path rootTempDir = new Path(System.getProperty("test.build.data", "/tmp"));
Path testDir = new Path(rootTempDir, "testDataStatistics");
FileSystem fs = testDir.getFileSystem(conf);
fs.delete(testDir, true);
Path testInputDir = new Path(testDir, "test");
fs.mkdirs(testInputDir);
// test empty folder (compression = true)
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
Boolean failed = null;
try {
GenerateData.publishDataStatistics(testInputDir, 1024L, conf);
failed = false;
} catch (RuntimeException e) {
failed = true;
}
assertNotNull("Expected failure!", failed);
assertTrue("Compression data publishing error", failed);
// test with empty folder (compression = off)
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, false);
stats = GenerateData.publishDataStatistics(testInputDir, 1024L, conf);
assertEquals("Data size mismatch", 0, stats.getDataSize());
assertEquals("Num files mismatch", 0, stats.getNumFiles());
assertFalse("Compression configuration mismatch", stats.isDataCompressed());
// test with some plain input data (compression = off)
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, false);
Path inputDataFile = new Path(testInputDir, "test");
long size =
UtilsForTests.createTmpFileDFS(fs, inputDataFile,
FsPermission.createImmutable((short)777), "hi hello bye").size();
stats = GenerateData.publishDataStatistics(testInputDir, -1, conf);
assertEquals("Data size mismatch", size, stats.getDataSize());
assertEquals("Num files mismatch", 1, stats.getNumFiles());
assertFalse("Compression configuration mismatch", stats.isDataCompressed());
// test with some plain input data (compression = on)
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
failed = null;
try {
GenerateData.publishDataStatistics(testInputDir, 1234L, conf);
failed = false;
} catch (RuntimeException e) {
failed = true;
}
assertNotNull("Expected failure!", failed);
assertTrue("Compression data publishing error", failed);
// test with some compressed input data (compression = off)
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, false);
fs.delete(inputDataFile, false);
inputDataFile = new Path(testInputDir, "test.gz");
size =
UtilsForTests.createTmpFileDFS(fs, inputDataFile,
FsPermission.createImmutable((short)777), "hi hello").size();
stats = GenerateData.publishDataStatistics(testInputDir, 1234L, conf);
assertEquals("Data size mismatch", size, stats.getDataSize());
assertEquals("Num files mismatch", 1, stats.getNumFiles());
assertFalse("Compression configuration mismatch", stats.isDataCompressed());
// test with some compressed input data (compression = on)
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
stats = GenerateData.publishDataStatistics(testInputDir, 1234L, conf);
assertEquals("Data size mismatch", size, stats.getDataSize());
assertEquals("Num files mismatch", 1, stats.getNumFiles());
assertTrue("Compression configuration mismatch", stats.isDataCompressed());
}
/**
* A fake {@link JobFactory}.
*/
@SuppressWarnings("rawtypes")
private static class FakeJobFactory extends JobFactory {
/**
* A fake {@link JobStoryProducer} for {@link FakeJobFactory}.
*/
private static class FakeJobStoryProducer implements JobStoryProducer {
@Override
public void close() throws IOException {
}
@Override
public JobStory getNextJob() throws IOException {
return null;
}
}
FakeJobFactory(Configuration conf) {
super(null, new FakeJobStoryProducer(), null, conf, null, null);
}
@Override
public void update(Object item) {
}
@Override
protected Thread createReaderThread() {
return new Thread();
}
}
/**
* Test {@link ExecutionSummarizer}.
*/
@Test (timeout=20000)
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testExecutionSummarizer() throws IOException {
Configuration conf = new Configuration();
ExecutionSummarizer es = new ExecutionSummarizer();
assertEquals("ExecutionSummarizer init failed",
Summarizer.NA, es.getCommandLineArgsString());
long startTime = System.currentTimeMillis();
// test configuration parameters
String[] initArgs = new String[] {"-Xmx20m", "-Dtest.args='test'"};
es = new ExecutionSummarizer(initArgs);
assertEquals("ExecutionSummarizer init failed",
"-Xmx20m -Dtest.args='test'",
es.getCommandLineArgsString());
// test start time
assertTrue("Start time mismatch", es.getStartTime() >= startTime);
assertTrue("Start time mismatch",
es.getStartTime() <= System.currentTimeMillis());
// test start() of ExecutionSummarizer
es.update(null);
assertEquals("ExecutionSummarizer init failed", 0,
es.getSimulationStartTime());
testExecutionSummarizer(0, 0, 0, 0, 0, 0, 0, es);
long simStartTime = System.currentTimeMillis();
es.start(null);
assertTrue("Simulation start time mismatch",
es.getSimulationStartTime() >= simStartTime);
assertTrue("Simulation start time mismatch",
es.getSimulationStartTime() <= System.currentTimeMillis());
// test with job stats
JobStats stats = generateFakeJobStats(1, 10, true, false);
es.update(stats);
testExecutionSummarizer(1, 10, 0, 1, 1, 0, 0, es);
// test with failed job
stats = generateFakeJobStats(5, 1, false, false);
es.update(stats);
testExecutionSummarizer(6, 11, 0, 2, 1, 1, 0, es);
// test with successful but lost job
stats = generateFakeJobStats(1, 1, true, true);
es.update(stats);
testExecutionSummarizer(7, 12, 0, 3, 1, 1, 1, es);
// test with failed but lost job
stats = generateFakeJobStats(2, 2, false, true);
es.update(stats);
testExecutionSummarizer(9, 14, 0, 4, 1, 1, 2, es);
// test finalize
// define a fake job factory
JobFactory factory = new FakeJobFactory(conf);
// fake the num jobs in trace
factory.numJobsInTrace = 3;
Path rootTempDir = new Path(System.getProperty("test.build.data", "/tmp"));
Path testDir = new Path(rootTempDir, "testGridmixSummary");
Path testTraceFile = new Path(testDir, "test-trace.json");
FileSystem fs = FileSystem.getLocal(conf);
fs.create(testTraceFile).close();
// finalize the summarizer
UserResolver resolver = new RoundRobinUserResolver();
DataStatistics dataStats = new DataStatistics(100, 2, true);
String policy = GridmixJobSubmissionPolicy.REPLAY.name();
conf.set(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY, policy);
es.finalize(factory, testTraceFile.toString(), 1024L, resolver, dataStats,
conf);
// test num jobs in trace
assertEquals("Mismtach in num jobs in trace", 3, es.getNumJobsInTrace());
// test trace signature
String tid =
ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",
tid, es.getInputTraceSignature());
// test trace location
Path qPath = fs.makeQualified(testTraceFile);
assertEquals("Mismatch in trace filename",
qPath.toString(), es.getInputTraceLocation());
// test expected data size
assertEquals("Mismatch in expected data size",
"1 K", es.getExpectedDataSize());
// test input data statistics
assertEquals("Mismatch in input data statistics",
ExecutionSummarizer.stringifyDataStatistics(dataStats),
es.getInputDataStatistics());
// test user resolver
assertEquals("Mismatch in user resolver",
resolver.getClass().getName(), es.getUserResolver());
// test policy
assertEquals("Mismatch in policy", policy, es.getJobSubmissionPolicy());
// test data stringification using large data
es.finalize(factory, testTraceFile.toString(), 1024*1024*1024*10L, resolver,
dataStats, conf);
assertEquals("Mismatch in expected data size",
"10 G", es.getExpectedDataSize());
// test trace signature uniqueness
// touch the trace file
fs.delete(testTraceFile, false);
// sleep for 1 sec
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {}
fs.create(testTraceFile).close();
es.finalize(factory, testTraceFile.toString(), 0L, resolver, dataStats,
conf);
// test missing expected data size
assertEquals("Mismatch in trace data size",
Summarizer.NA, es.getExpectedDataSize());
assertFalse("Mismatch in trace signature",
tid.equals(es.getInputTraceSignature()));
// get the new identifier
tid = ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",
tid, es.getInputTraceSignature());
testTraceFile = new Path(testDir, "test-trace2.json");
fs.create(testTraceFile).close();
es.finalize(factory, testTraceFile.toString(), 0L, resolver, dataStats,
conf);
assertFalse("Mismatch in trace signature",
tid.equals(es.getInputTraceSignature()));
// get the new identifier
tid = ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",
tid, es.getInputTraceSignature());
// finalize trace identifier '-' input
es.finalize(factory, "-", 0L, resolver, dataStats, conf);
assertEquals("Mismatch in trace signature",
Summarizer.NA, es.getInputTraceSignature());
assertEquals("Mismatch in trace file location",
Summarizer.NA, es.getInputTraceLocation());
}
// test the ExecutionSummarizer
private static void testExecutionSummarizer(int numMaps, int numReds,
int totalJobsInTrace, int totalJobSubmitted, int numSuccessfulJob,
int numFailedJobs, int numLostJobs, ExecutionSummarizer es) {
assertEquals("ExecutionSummarizer test failed [num-maps]",
numMaps, es.getNumMapTasksLaunched());
assertEquals("ExecutionSummarizer test failed [num-reducers]",
numReds, es.getNumReduceTasksLaunched());
assertEquals("ExecutionSummarizer test failed [num-jobs-in-trace]",
totalJobsInTrace, es.getNumJobsInTrace());
assertEquals("ExecutionSummarizer test failed [num-submitted jobs]",
totalJobSubmitted, es.getNumSubmittedJobs());
assertEquals("ExecutionSummarizer test failed [num-successful-jobs]",
numSuccessfulJob, es.getNumSuccessfulJobs());
assertEquals("ExecutionSummarizer test failed [num-failed jobs]",
numFailedJobs, es.getNumFailedJobs());
assertEquals("ExecutionSummarizer test failed [num-lost jobs]",
numLostJobs, es.getNumLostJobs());
}
// generate fake job stats
@SuppressWarnings("deprecation")
private static JobStats generateFakeJobStats(final int numMaps,
final int numReds, final boolean isSuccessful, final boolean lost)
throws IOException {
// A fake job
Job fakeJob = new Job() {
@Override
public int getNumReduceTasks() {
return numReds;
};
@Override
public boolean isSuccessful() throws IOException {
if (lost) {
throw new IOException("Test failure!");
}
return isSuccessful;
};
};
return new JobStats(numMaps, numReds, fakeJob);
}
/**
* Test {@link ClusterSummarizer}.
*/
@Test (timeout=20000)
public void testClusterSummarizer() throws IOException {
ClusterSummarizer cs = new ClusterSummarizer();
Configuration conf = new Configuration();
String jt = "test-jt:1234";
String nn = "test-nn:5678";
conf.set(JTConfig.JT_IPC_ADDRESS, jt);
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, nn);
cs.start(conf);
assertEquals("JT name mismatch", jt, cs.getJobTrackerInfo());
assertEquals("NN name mismatch", nn, cs.getNamenodeInfo());
ClusterStats cStats = ClusterStats.getClusterStats();
conf.set(JTConfig.JT_IPC_ADDRESS, "local");
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "local");
JobClient jc = new JobClient(conf);
cStats.setClusterMetric(jc.getClusterStatus());
cs.update(cStats);
// test
assertEquals("Cluster summary test failed!", 1, cs.getMaxMapTasks());
assertEquals("Cluster summary test failed!", 1, cs.getMaxReduceTasks());
assertEquals("Cluster summary test failed!", 1, cs.getNumActiveTrackers());
assertEquals("Cluster summary test failed!", 0,
cs.getNumBlacklistedTrackers());
}
}
| 15,806 | 39.32398 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridMixClasses.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.CustomOutputCommitter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapred.RawKeyValueIterator;
import org.apache.hadoop.mapred.gridmix.GridmixKey.Spec;
import org.apache.hadoop.mapred.gridmix.SleepJob.SleepReducer;
import org.apache.hadoop.mapred.gridmix.SleepJob.SleepSplit;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.ReduceContext;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.counters.GenericCounter;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.mapreduce.task.ReduceContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl.DummyReporter;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.tools.rumen.ZombieJobProducer;
import org.apache.hadoop.util.Progress;
import org.junit.Assert;
import org.junit.Test;
import static org.mockito.Mockito.*;
import static org.junit.Assert.*;
public class TestGridMixClasses {
private static final Log LOG = LogFactory.getLog(TestGridMixClasses.class);
/*
* simple test LoadSplit (getters,copy, write, read...)
*/
@Test (timeout=1000)
public void testLoadSplit() throws Exception {
LoadSplit test = getLoadSplit();
ByteArrayOutputStream data = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(data);
test.write(out);
LoadSplit copy = new LoadSplit();
copy.readFields(new DataInputStream(new ByteArrayInputStream(data
.toByteArray())));
// data should be the same
assertEquals(test.getId(), copy.getId());
assertEquals(test.getMapCount(), copy.getMapCount());
assertEquals(test.getInputRecords(), copy.getInputRecords());
assertEquals(test.getOutputBytes()[0], copy.getOutputBytes()[0]);
assertEquals(test.getOutputRecords()[0], copy.getOutputRecords()[0]);
assertEquals(test.getReduceBytes(0), copy.getReduceBytes(0));
assertEquals(test.getReduceRecords(0), copy.getReduceRecords(0));
assertEquals(test.getMapResourceUsageMetrics().getCumulativeCpuUsage(),
copy.getMapResourceUsageMetrics().getCumulativeCpuUsage());
assertEquals(test.getReduceResourceUsageMetrics(0).getCumulativeCpuUsage(),
copy.getReduceResourceUsageMetrics(0).getCumulativeCpuUsage());
}
/*
* simple test GridmixSplit (copy, getters, write, read..)
*/
@Test (timeout=1000)
public void testGridmixSplit() throws Exception {
Path[] files = {new Path("one"), new Path("two")};
long[] start = {1, 2};
long[] lengths = {100, 200};
String[] locations = {"locOne", "loctwo"};
CombineFileSplit cfSplit = new CombineFileSplit(files, start, lengths,
locations);
ResourceUsageMetrics metrics = new ResourceUsageMetrics();
metrics.setCumulativeCpuUsage(200);
double[] reduceBytes = {8.1d, 8.2d};
double[] reduceRecords = {9.1d, 9.2d};
long[] reduceOutputBytes = {101L, 102L};
long[] reduceOutputRecords = {111L, 112L};
GridmixSplit test = new GridmixSplit(cfSplit, 2, 3, 4L, 5L, 6L, 7L,
reduceBytes, reduceRecords, reduceOutputBytes, reduceOutputRecords);
ByteArrayOutputStream data = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(data);
test.write(out);
GridmixSplit copy = new GridmixSplit();
copy.readFields(new DataInputStream(new ByteArrayInputStream(data
.toByteArray())));
// data should be the same
assertEquals(test.getId(), copy.getId());
assertEquals(test.getMapCount(), copy.getMapCount());
assertEquals(test.getInputRecords(), copy.getInputRecords());
assertEquals(test.getOutputBytes()[0], copy.getOutputBytes()[0]);
assertEquals(test.getOutputRecords()[0], copy.getOutputRecords()[0]);
assertEquals(test.getReduceBytes(0), copy.getReduceBytes(0));
assertEquals(test.getReduceRecords(0), copy.getReduceRecords(0));
}
/*
* test LoadMapper loadMapper should write to writer record for each reduce
*/
@SuppressWarnings({"rawtypes", "unchecked"})
@Test (timeout=10000)
public void testLoadMapper() throws Exception {
Configuration conf = new Configuration();
conf.setInt(JobContext.NUM_REDUCES, 2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
TaskAttemptID taskId = new TaskAttemptID();
RecordReader<NullWritable, GridmixRecord> reader = new FakeRecordReader();
LoadRecordGkGrWriter writer = new LoadRecordGkGrWriter();
OutputCommitter committer = new CustomOutputCommitter();
StatusReporter reporter = new TaskAttemptContextImpl.DummyReporter();
LoadSplit split = getLoadSplit();
MapContext<NullWritable, GridmixRecord, GridmixKey, GridmixRecord> mapContext = new MapContextImpl<NullWritable, GridmixRecord, GridmixKey, GridmixRecord>(
conf, taskId, reader, writer, committer, reporter, split);
// context
Context ctx = new WrappedMapper<NullWritable, GridmixRecord, GridmixKey, GridmixRecord>()
.getMapContext(mapContext);
reader.initialize(split, ctx);
ctx.getConfiguration().setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
CompressionEmulationUtil.setCompressionEmulationEnabled(
ctx.getConfiguration(), true);
LoadJob.LoadMapper mapper = new LoadJob.LoadMapper();
// setup, map, clean
mapper.run(ctx);
Map<GridmixKey, GridmixRecord> data = writer.getData();
// check result
assertEquals(2, data.size());
}
private LoadSplit getLoadSplit() throws Exception {
Path[] files = {new Path("one"), new Path("two")};
long[] start = {1, 2};
long[] lengths = {100, 200};
String[] locations = {"locOne", "loctwo"};
CombineFileSplit cfSplit = new CombineFileSplit(files, start, lengths,
locations);
ResourceUsageMetrics metrics = new ResourceUsageMetrics();
metrics.setCumulativeCpuUsage(200);
ResourceUsageMetrics[] rMetrics = {metrics};
double[] reduceBytes = {8.1d, 8.2d};
double[] reduceRecords = {9.1d, 9.2d};
long[] reduceOutputBytes = {101L, 102L};
long[] reduceOutputRecords = {111L, 112L};
return new LoadSplit(cfSplit, 2, 1, 4L, 5L, 6L, 7L,
reduceBytes, reduceRecords, reduceOutputBytes, reduceOutputRecords,
metrics, rMetrics);
}
private class FakeRecordLLReader extends
RecordReader<LongWritable, LongWritable> {
int counter = 10;
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
counter--;
return counter > 0;
}
@Override
public LongWritable getCurrentKey() throws IOException,
InterruptedException {
return new LongWritable(counter);
}
@Override
public LongWritable getCurrentValue() throws IOException,
InterruptedException {
return new LongWritable(counter * 10);
}
@Override
public float getProgress() throws IOException, InterruptedException {
return counter / 10.0f;
}
@Override
public void close() throws IOException {
// restore data
counter = 10;
}
}
private class FakeRecordReader extends
RecordReader<NullWritable, GridmixRecord> {
int counter = 10;
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
counter--;
return counter > 0;
}
@Override
public NullWritable getCurrentKey() throws IOException,
InterruptedException {
return NullWritable.get();
}
@Override
public GridmixRecord getCurrentValue() throws IOException,
InterruptedException {
return new GridmixRecord(100, 100L);
}
@Override
public float getProgress() throws IOException, InterruptedException {
return counter / 10.0f;
}
@Override
public void close() throws IOException {
// restore data
counter = 10;
}
}
private class LoadRecordGkGrWriter extends
RecordWriter<GridmixKey, GridmixRecord> {
private Map<GridmixKey, GridmixRecord> data = new HashMap<GridmixKey, GridmixRecord>();
@Override
public void write(GridmixKey key, GridmixRecord value) throws IOException,
InterruptedException {
data.put(key, value);
}
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
}
public Map<GridmixKey, GridmixRecord> getData() {
return data;
}
}
private class LoadRecordGkNullWriter extends
RecordWriter<GridmixKey, NullWritable> {
private Map<GridmixKey, NullWritable> data = new HashMap<GridmixKey, NullWritable>();
@Override
public void write(GridmixKey key, NullWritable value) throws IOException,
InterruptedException {
data.put(key, value);
}
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
}
public Map<GridmixKey, NullWritable> getData() {
return data;
}
}
private class LoadRecordWriter extends
RecordWriter<NullWritable, GridmixRecord> {
private Map<NullWritable, GridmixRecord> data = new HashMap<NullWritable, GridmixRecord>();
@Override
public void write(NullWritable key, GridmixRecord value)
throws IOException, InterruptedException {
data.put(key, value);
}
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
}
public Map<NullWritable, GridmixRecord> getData() {
return data;
}
}
/*
* test LoadSortComparator
*/
@Test (timeout=3000)
public void testLoadJobLoadSortComparator() throws Exception {
LoadJob.LoadSortComparator test = new LoadJob.LoadSortComparator();
ByteArrayOutputStream data = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(data);
WritableUtils.writeVInt(dos, 2);
WritableUtils.writeVInt(dos, 1);
WritableUtils.writeVInt(dos, 4);
WritableUtils.writeVInt(dos, 7);
WritableUtils.writeVInt(dos, 4);
byte[] b1 = data.toByteArray();
byte[] b2 = data.toByteArray();
// the same data should be equals
assertEquals(0, test.compare(b1, 0, 1, b2, 0, 1));
b2[2] = 5;
// compare like GridMixKey first byte: shift count -1=4-5
assertEquals(-1, test.compare(b1, 0, 1, b2, 0, 1));
b2[2] = 2;
// compare like GridMixKey first byte: shift count 2=4-2
assertEquals(2, test.compare(b1, 0, 1, b2, 0, 1));
// compare arrays by first byte witch offset (2-1) because 4==4
b2[2] = 4;
assertEquals(1, test.compare(b1, 0, 1, b2, 1, 1));
}
/*
* test SpecGroupingComparator
*/
@Test (timeout=3000)
public void testGridmixJobSpecGroupingComparator() throws Exception {
GridmixJob.SpecGroupingComparator test = new GridmixJob.SpecGroupingComparator();
ByteArrayOutputStream data = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(data);
WritableUtils.writeVInt(dos, 2);
WritableUtils.writeVInt(dos, 1);
// 0: REDUCE SPEC
WritableUtils.writeVInt(dos, 0);
WritableUtils.writeVInt(dos, 7);
WritableUtils.writeVInt(dos, 4);
byte[] b1 = data.toByteArray();
byte[] b2 = data.toByteArray();
// the same object should be equals
assertEquals(0, test.compare(b1, 0, 1, b2, 0, 1));
b2[2] = 1;
// for Reduce
assertEquals(-1, test.compare(b1, 0, 1, b2, 0, 1));
// by Reduce spec
b2[2] = 1; // 1: DATA SPEC
assertEquals(-1, test.compare(b1, 0, 1, b2, 0, 1));
// compare GridmixKey the same objects should be equals
assertEquals(0, test.compare(new GridmixKey(GridmixKey.DATA, 100, 2),
new GridmixKey(GridmixKey.DATA, 100, 2)));
// REDUSE SPEC
assertEquals(-1, test.compare(
new GridmixKey(GridmixKey.REDUCE_SPEC, 100, 2), new GridmixKey(
GridmixKey.DATA, 100, 2)));
assertEquals(1, test.compare(new GridmixKey(GridmixKey.DATA, 100, 2),
new GridmixKey(GridmixKey.REDUCE_SPEC, 100, 2)));
// only DATA
assertEquals(2, test.compare(new GridmixKey(GridmixKey.DATA, 102, 2),
new GridmixKey(GridmixKey.DATA, 100, 2)));
}
/*
* test CompareGridmixJob only equals and compare
*/
@Test (timeout=30000)
public void testCompareGridmixJob() throws Exception {
Configuration conf = new Configuration();
Path outRoot = new Path("target");
JobStory jobDesc = mock(JobStory.class);
when(jobDesc.getName()).thenReturn("JobName");
when(jobDesc.getJobConf()).thenReturn(new JobConf(conf));
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
GridmixJob j1 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 0);
GridmixJob j2 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 0);
GridmixJob j3 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 1);
GridmixJob j4 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 1);
assertTrue(j1.equals(j2));
assertEquals(0, j1.compareTo(j2));
// Only one parameter matters
assertFalse(j1.equals(j3));
// compare id and submissionMillis
assertEquals(-1, j1.compareTo(j3));
assertEquals(-1, j1.compareTo(j4));
}
/*
* test ReadRecordFactory. should read all data from inputstream
*/
@Test (timeout=3000)
public void testReadRecordFactory() throws Exception {
// RecordFactory factory, InputStream src, Configuration conf
RecordFactory rf = new FakeRecordFactory();
FakeInputStream input = new FakeInputStream();
ReadRecordFactory test = new ReadRecordFactory(rf, input,
new Configuration());
GridmixKey key = new GridmixKey(GridmixKey.DATA, 100, 2);
GridmixRecord val = new GridmixRecord(200, 2);
while (test.next(key, val)) {
}
// should be read 10* (GridmixKey.size +GridmixRecord.value)
assertEquals(3000, input.getCounter());
// should be -1 because all data readed;
assertEquals(-1, rf.getProgress(), 0.01);
test.close();
}
private class FakeRecordFactory extends RecordFactory {
private int counter = 10;
@Override
public void close() throws IOException {
}
@Override
public boolean next(GridmixKey key, GridmixRecord val) throws IOException {
counter--;
return counter >= 0;
}
@Override
public float getProgress() throws IOException {
return counter;
}
}
private class FakeInputStream extends InputStream implements Seekable,
PositionedReadable {
private long counter;
@Override
public int read() throws IOException {
return 0;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int realLen = len - off;
counter += realLen;
for (int i = 0; i < b.length; i++) {
b[i] = 0;
}
return realLen;
}
public long getCounter() {
return counter;
}
@Override
public void seek(long pos) throws IOException {
}
@Override
public long getPos() throws IOException {
return counter;
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
return 0;
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
}
}
private class FakeFSDataInputStream extends FSDataInputStream {
public FakeFSDataInputStream(InputStream in) throws IOException {
super(in);
}
}
/*
* test LoadRecordReader. It class reads data from some files.
*/
@Test (timeout=3000)
public void testLoadJobLoadRecordReader() throws Exception {
LoadJob.LoadRecordReader test = new LoadJob.LoadRecordReader();
Configuration conf = new Configuration();
FileSystem fs1 = mock(FileSystem.class);
when(fs1.open((Path) anyObject())).thenReturn(
new FakeFSDataInputStream(new FakeInputStream()));
Path p1 = mock(Path.class);
when(p1.getFileSystem((JobConf) anyObject())).thenReturn(fs1);
FileSystem fs2 = mock(FileSystem.class);
when(fs2.open((Path) anyObject())).thenReturn(
new FakeFSDataInputStream(new FakeInputStream()));
Path p2 = mock(Path.class);
when(p2.getFileSystem((JobConf) anyObject())).thenReturn(fs2);
Path[] paths = {p1, p2};
long[] start = {0, 0};
long[] lengths = {1000, 1000};
String[] locations = {"temp1", "temp2"};
CombineFileSplit cfsplit = new CombineFileSplit(paths, start, lengths,
locations);
double[] reduceBytes = {100, 100};
double[] reduceRecords = {2, 2};
long[] reduceOutputBytes = {500, 500};
long[] reduceOutputRecords = {2, 2};
ResourceUsageMetrics metrics = new ResourceUsageMetrics();
ResourceUsageMetrics[] rMetrics = {new ResourceUsageMetrics(),
new ResourceUsageMetrics()};
LoadSplit input = new LoadSplit(cfsplit, 2, 3, 1500L, 2L, 3000L, 2L,
reduceBytes, reduceRecords, reduceOutputBytes, reduceOutputRecords,
metrics, rMetrics);
TaskAttemptID taskId = new TaskAttemptID();
TaskAttemptContext ctx = new TaskAttemptContextImpl(conf, taskId);
test.initialize(input, ctx);
GridmixRecord gr = test.getCurrentValue();
int counter = 0;
while (test.nextKeyValue()) {
gr = test.getCurrentValue();
if (counter == 0) {
// read first file
assertEquals(0.5, test.getProgress(), 0.001);
} else if (counter == 1) {
// read second file
assertEquals(1.0, test.getProgress(), 0.001);
}
//
assertEquals(1000, gr.getSize());
counter++;
}
assertEquals(1000, gr.getSize());
// Two files have been read
assertEquals(2, counter);
test.close();
}
/*
* test LoadReducer
*/
@Test (timeout=3000)
public void testLoadJobLoadReducer() throws Exception {
LoadJob.LoadReducer test = new LoadJob.LoadReducer();
Configuration conf = new Configuration();
conf.setInt(JobContext.NUM_REDUCES, 2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(FileOutputFormat.COMPRESS, true);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
TaskAttemptID taskid = new TaskAttemptID();
RawKeyValueIterator input = new FakeRawKeyValueIterator();
Counter counter = new GenericCounter();
Counter inputValueCounter = new GenericCounter();
LoadRecordWriter output = new LoadRecordWriter();
OutputCommitter committer = new CustomOutputCommitter();
StatusReporter reporter = new DummyReporter();
RawComparator<GridmixKey> comparator = new FakeRawComparator();
ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(
conf, taskid, input, counter, inputValueCounter, output, committer,
reporter, comparator, GridmixKey.class, GridmixRecord.class);
// read for previous data
reduceContext.nextKeyValue();
org.apache.hadoop.mapreduce.Reducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>.Context context = new WrappedReducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>()
.getReducerContext(reduceContext);
// test.setup(context);
test.run(context);
// have been readed 9 records (-1 for previous)
assertEquals(9, counter.getValue());
assertEquals(10, inputValueCounter.getValue());
assertEquals(1, output.getData().size());
GridmixRecord record = output.getData().values().iterator()
.next();
assertEquals(1593, record.getSize());
}
protected class FakeRawKeyValueIterator implements RawKeyValueIterator {
int counter = 10;
@Override
public DataInputBuffer getKey() throws IOException {
ByteArrayOutputStream dt = new ByteArrayOutputStream();
GridmixKey key = new GridmixKey(GridmixKey.REDUCE_SPEC, 10 * counter, 1L);
Spec spec = new Spec();
spec.rec_in = counter;
spec.rec_out = counter;
spec.bytes_out = counter * 100;
key.setSpec(spec);
key.write(new DataOutputStream(dt));
DataInputBuffer result = new DataInputBuffer();
byte[] b = dt.toByteArray();
result.reset(b, 0, b.length);
return result;
}
@Override
public DataInputBuffer getValue() throws IOException {
ByteArrayOutputStream dt = new ByteArrayOutputStream();
GridmixRecord key = new GridmixRecord(100, 1);
key.write(new DataOutputStream(dt));
DataInputBuffer result = new DataInputBuffer();
byte[] b = dt.toByteArray();
result.reset(b, 0, b.length);
return result;
}
@Override
public boolean next() throws IOException {
counter--;
return counter >= 0;
}
@Override
public void close() throws IOException {
}
@Override
public Progress getProgress() {
return null;
}
}
private class FakeRawComparator implements RawComparator<GridmixKey> {
@Override
public int compare(GridmixKey o1, GridmixKey o2) {
return o1.compareTo(o2);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
if ((l1 - s1) != (l2 - s2)) {
return (l1 - s1) - (l2 - s2);
}
int len = l1 - s1;
for (int i = 0; i < len; i++) {
if (b1[s1 + i] != b2[s2 + i]) {
return b1[s1 + i] - b2[s2 + i];
}
}
return 0;
}
}
/*
* test SerialJobFactory
*/
@Test (timeout=120000)
public void testSerialReaderThread() throws Exception {
Configuration conf = new Configuration();
File fin = new File("src" + File.separator + "test" + File.separator
+ "resources" + File.separator + "data" + File.separator
+ "wordcount2.json");
// read couple jobs from wordcount2.json
JobStoryProducer jobProducer = new ZombieJobProducer(new Path(
fin.getAbsolutePath()), null, conf);
CountDownLatch startFlag = new CountDownLatch(1);
UserResolver resolver = new SubmitterUserResolver();
FakeJobSubmitter submitter = new FakeJobSubmitter();
File ws = new File("target" + File.separator + this.getClass().getName());
if (!ws.exists()) {
Assert.assertTrue(ws.mkdirs());
}
SerialJobFactory jobFactory = new SerialJobFactory(submitter, jobProducer,
new Path(ws.getAbsolutePath()), conf, startFlag, resolver);
Path ioPath = new Path(ws.getAbsolutePath());
jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf, ioPath));
Thread test = jobFactory.createReaderThread();
test.start();
Thread.sleep(1000);
// SerialReaderThread waits startFlag
assertEquals(0, submitter.getJobs().size());
// start!
startFlag.countDown();
while (test.isAlive()) {
Thread.sleep(1000);
jobFactory.update(null);
}
// submitter was called twice
assertEquals(2, submitter.getJobs().size());
}
private class FakeJobSubmitter extends JobSubmitter {
// counter for submitted jobs
private List<GridmixJob> jobs = new ArrayList<GridmixJob>();
public FakeJobSubmitter() {
super(null, 1, 1, null, null);
}
@Override
public void add(GridmixJob job) throws InterruptedException {
jobs.add(job);
}
public List<GridmixJob> getJobs() {
return jobs;
}
}
/*
* test SleepMapper
*/
@SuppressWarnings({"unchecked", "rawtypes"})
@Test (timeout=30000)
public void testSleepMapper() throws Exception {
SleepJob.SleepMapper test = new SleepJob.SleepMapper();
Configuration conf = new Configuration();
conf.setInt(JobContext.NUM_REDUCES, 2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
TaskAttemptID taskId = new TaskAttemptID();
FakeRecordLLReader reader = new FakeRecordLLReader();
LoadRecordGkNullWriter writer = new LoadRecordGkNullWriter();
OutputCommitter committer = new CustomOutputCommitter();
StatusReporter reporter = new TaskAttemptContextImpl.DummyReporter();
SleepSplit split = getSleepSplit();
MapContext<LongWritable, LongWritable, GridmixKey, NullWritable> mapcontext = new MapContextImpl<LongWritable, LongWritable, GridmixKey, NullWritable>(
conf, taskId, reader, writer, committer, reporter, split);
Context context = new WrappedMapper<LongWritable, LongWritable, GridmixKey, NullWritable>()
.getMapContext(mapcontext);
long start = System.currentTimeMillis();
LOG.info("start:" + start);
LongWritable key = new LongWritable(start + 2000);
LongWritable value = new LongWritable(start + 2000);
// should slip 2 sec
test.map(key, value, context);
LOG.info("finish:" + System.currentTimeMillis());
assertTrue(System.currentTimeMillis() >= (start + 2000));
test.cleanup(context);
assertEquals(1, writer.getData().size());
}
private SleepSplit getSleepSplit() throws Exception {
String[] locations = {"locOne", "loctwo"};
long[] reduceDurations = {101L, 102L};
return new SleepSplit(0, 2000L, reduceDurations, 2, locations);
}
/*
* test SleepReducer
*/
@Test (timeout=3000)
public void testSleepReducer() throws Exception {
Configuration conf = new Configuration();
conf.setInt(JobContext.NUM_REDUCES, 2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(FileOutputFormat.COMPRESS, true);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
TaskAttemptID taskId = new TaskAttemptID();
RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();
Counter counter = new GenericCounter();
Counter inputValueCounter = new GenericCounter();
RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();
OutputCommitter committer = new CustomOutputCommitter();
StatusReporter reporter = new DummyReporter();
RawComparator<GridmixKey> comparator = new FakeRawComparator();
ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(
conf, taskId, input, counter, inputValueCounter, output, committer,
reporter, comparator, GridmixKey.class, NullWritable.class);
org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>()
.getReducerContext(reducecontext);
SleepReducer test = new SleepReducer();
long start = System.currentTimeMillis();
test.setup(context);
long sleeper = context.getCurrentKey().getReduceOutputBytes();
// status has been changed
assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
// should sleep 0.9 sec
assertTrue(System.currentTimeMillis() >= (start + sleeper));
test.cleanup(context);
// status has been changed again
assertEquals("Slept for " + sleeper, context.getStatus());
}
private class LoadRecordReduceWriter extends
RecordWriter<NullWritable, NullWritable> {
@Override
public void write(NullWritable key, NullWritable value) throws IOException,
InterruptedException {
}
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
}
}
protected class FakeRawKeyValueReducerIterator implements RawKeyValueIterator {
int counter = 10;
@Override
public DataInputBuffer getKey() throws IOException {
ByteArrayOutputStream dt = new ByteArrayOutputStream();
GridmixKey key = new GridmixKey(GridmixKey.REDUCE_SPEC, 10 * counter, 1L);
Spec spec = new Spec();
spec.rec_in = counter;
spec.rec_out = counter;
spec.bytes_out = counter * 100;
key.setSpec(spec);
key.write(new DataOutputStream(dt));
DataInputBuffer result = new DataInputBuffer();
byte[] b = dt.toByteArray();
result.reset(b, 0, b.length);
return result;
}
@Override
public DataInputBuffer getValue() throws IOException {
ByteArrayOutputStream dt = new ByteArrayOutputStream();
NullWritable key = NullWritable.get();
key.write(new DataOutputStream(dt));
DataInputBuffer result = new DataInputBuffer();
byte[] b = dt.toByteArray();
result.reset(b, 0, b.length);
return result;
}
@Override
public boolean next() throws IOException {
counter--;
return counter >= 0;
}
@Override
public void close() throws IOException {
}
@Override
public Progress getProgress() {
return null;
}
}
}
| 32,469 | 31.79798 | 190 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFilePool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Random;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
public class TestFilePool {
static final Log LOG = LogFactory.getLog(TestFileQueue.class);
static final int NFILES = 26;
static final Path base = getBaseDir();
static Path getBaseDir() {
try {
final Configuration conf = new Configuration();
final FileSystem fs = FileSystem.getLocal(conf).getRaw();
return new Path(System.getProperty("test.build.data", "/tmp"),
"testFilePool").makeQualified(fs);
} catch (IOException e) {
fail();
}
return null;
}
@BeforeClass
public static void setup() throws IOException {
final Configuration conf = new Configuration();
final FileSystem fs = FileSystem.getLocal(conf).getRaw();
fs.delete(base, true);
final Random r = new Random();
final long seed = r.nextLong();
r.setSeed(seed);
LOG.info("seed: " + seed);
fs.mkdirs(base);
for (int i = 0; i < NFILES; ++i) {
Path file = base;
for (double d = 0.6; d > 0.0; d *= 0.8) {
if (r.nextDouble() < d) {
file = new Path(base, Integer.toString(r.nextInt(3)));
continue;
}
break;
}
OutputStream out = null;
try {
out = fs.create(new Path(file, "" + (char)('A' + i)));
final byte[] b = new byte[1024];
Arrays.fill(b, (byte)('A' + i));
for (int len = ((i % 13) + 1) * 1024; len > 0; len -= 1024) {
out.write(b);
}
} finally {
if (out != null) {
out.close();
}
}
}
}
@AfterClass
public static void cleanup() throws IOException {
final Configuration conf = new Configuration();
final FileSystem fs = FileSystem.getLocal(conf).getRaw();
fs.delete(base, true);
}
@Test
public void testUnsuitable() throws Exception {
try {
final Configuration conf = new Configuration();
// all files 13k or less
conf.setLong(FilePool.GRIDMIX_MIN_FILE, 14 * 1024);
final FilePool pool = new FilePool(conf, base);
pool.refresh();
} catch (IOException e) {
return;
}
fail();
}
@Test
public void testPool() throws Exception {
final Random r = new Random();
final Configuration conf = new Configuration();
conf.setLong(FilePool.GRIDMIX_MIN_FILE, 3 * 1024);
final FilePool pool = new FilePool(conf, base);
pool.refresh();
final ArrayList<FileStatus> files = new ArrayList<FileStatus>();
// ensure 1k, 2k files excluded
final int expectedPoolSize = (NFILES / 2 * (NFILES / 2 + 1) - 6) * 1024;
assertEquals(expectedPoolSize, pool.getInputFiles(Long.MAX_VALUE, files));
assertEquals(NFILES - 4, files.size());
// exact match
files.clear();
assertEquals(expectedPoolSize, pool.getInputFiles(expectedPoolSize, files));
// match random within 12k
files.clear();
final long rand = r.nextInt(expectedPoolSize);
assertTrue("Missed: " + rand,
(NFILES / 2) * 1024 > rand - pool.getInputFiles(rand, files));
// all files
conf.setLong(FilePool.GRIDMIX_MIN_FILE, 0);
pool.refresh();
files.clear();
assertEquals((NFILES / 2 * (NFILES / 2 + 1)) * 1024,
pool.getInputFiles(Long.MAX_VALUE, files));
}
void checkSplitEq(FileSystem fs, CombineFileSplit split, long bytes)
throws Exception {
long splitBytes = 0L;
HashSet<Path> uniq = new HashSet<Path>();
for (int i = 0; i < split.getNumPaths(); ++i) {
splitBytes += split.getLength(i);
assertTrue(
split.getLength(i) <= fs.getFileStatus(split.getPath(i)).getLen());
assertFalse(uniq.contains(split.getPath(i)));
uniq.add(split.getPath(i));
}
assertEquals(bytes, splitBytes);
}
@Test
public void testStriper() throws Exception {
final Random r = new Random();
final Configuration conf = new Configuration();
final FileSystem fs = FileSystem.getLocal(conf).getRaw();
conf.setLong(FilePool.GRIDMIX_MIN_FILE, 3 * 1024);
final FilePool pool = new FilePool(conf, base) {
@Override
public BlockLocation[] locationsFor(FileStatus stat, long start, long len)
throws IOException {
return new BlockLocation[] { new BlockLocation() };
}
};
pool.refresh();
final int expectedPoolSize = (NFILES / 2 * (NFILES / 2 + 1) - 6) * 1024;
final InputStriper striper = new InputStriper(pool, expectedPoolSize);
int last = 0;
for (int i = 0; i < expectedPoolSize;
last = Math.min(expectedPoolSize - i, r.nextInt(expectedPoolSize))) {
checkSplitEq(fs, striper.splitFor(pool, last, 0), last);
i += last;
}
final InputStriper striper2 = new InputStriper(pool, expectedPoolSize);
checkSplitEq(fs, striper2.splitFor(pool, expectedPoolSize, 0),
expectedPoolSize);
}
}
| 6,256 | 31.931579 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixStatistics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.util.concurrent.CountDownLatch;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.gridmix.Statistics.ClusterStats;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
import org.apache.hadoop.tools.rumen.TaskInfo;
import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Test the Gridmix's {@link Statistics} class.
*/
public class TestGridmixStatistics {
/**
* Test {@link Statistics.JobStats}.
*/
@Test
@SuppressWarnings("deprecation")
public void testJobStats() throws Exception {
Job job = new Job() {};
JobStats stats = new JobStats(1, 2, job);
assertEquals("Incorrect num-maps", 1, stats.getNoOfMaps());
assertEquals("Incorrect num-reds", 2, stats.getNoOfReds());
assertTrue("Incorrect job", job == stats.getJob());
assertNull("Unexpected job status", stats.getJobStatus());
// add a new status
JobStatus status = new JobStatus();
stats.updateJobStatus(status);
assertNotNull("Missing job status", stats.getJobStatus());
assertTrue("Incorrect job status", status == stats.getJobStatus());
}
private static JobStory getCustomJobStory(final int numMaps,
final int numReds) {
return new JobStory() {
@Override
public InputSplit[] getInputSplits() {
return null;
}
@Override
public JobConf getJobConf() {
return null;
}
@Override
public JobID getJobID() {
return null;
}
@Override
public TaskAttemptInfo getMapTaskAttemptInfoAdjusted(int arg0, int arg1,
int arg2) {
return null;
}
@Override
public String getName() {
return null;
}
@Override
public int getNumberMaps() {
return numMaps;
}
@Override
public int getNumberReduces() {
return numReds;
}
@Override
public Values getOutcome() {
return null;
}
@Override
public String getQueueName() {
return null;
}
@Override
public long getSubmissionTime() {
return 0;
}
@Override
public TaskAttemptInfo getTaskAttemptInfo(TaskType arg0, int arg1,
int arg2) {
return null;
}
@Override
public TaskInfo getTaskInfo(TaskType arg0, int arg1) {
return null;
}
@Override
public String getUser() {
return null;
}
};
}
/**
* Test {@link Statistics}.
*/
@Test
@SuppressWarnings("deprecation")
public void testStatistics() throws Exception {
// test job stats generation
Configuration conf = new Configuration();
// test dummy jobs like data-generation etc
Job job = new Job(conf) {
};
JobStats stats = Statistics.generateJobStats(job, null);
testJobStats(stats, -1, -1, null, job);
// add a job desc with 2 map and 1 reduce task
conf.setInt(GridmixJob.GRIDMIX_JOB_SEQ, 1);
// test dummy jobs like data-generation etc
job = new Job(conf) {
};
JobStory zjob = getCustomJobStory(2, 1);
stats = Statistics.generateJobStats(job, zjob);
testJobStats(stats, 2, 1, null, job);
// add a job status
JobStatus jStatus = new JobStatus();
stats.updateJobStatus(jStatus);
testJobStats(stats, 2, 1, jStatus, job);
// start the statistics
CountDownLatch startFlag = new CountDownLatch(1); // prevents the collector
// thread from starting
Statistics statistics = new Statistics(new JobConf(), 0, startFlag);
statistics.start();
testClusterStats(0, 0, 0);
// add to the statistics object
statistics.addJobStats(stats);
testClusterStats(2, 1, 1);
// add another job
JobStory zjob2 = getCustomJobStory(10, 5);
conf.setInt(GridmixJob.GRIDMIX_JOB_SEQ, 2);
job = new Job(conf) {
};
JobStats stats2 = Statistics.generateJobStats(job, zjob2);
statistics.addJobStats(stats2);
testClusterStats(12, 6, 2);
// finish off one job
statistics.add(stats2);
testClusterStats(2, 1, 1);
// finish off the other job
statistics.add(stats);
testClusterStats(0, 0, 0);
statistics.shutdown();
}
// test the job stats
private static void testJobStats(JobStats stats, int numMaps, int numReds,
JobStatus jStatus, Job job) {
assertEquals("Incorrect num map tasks", numMaps, stats.getNoOfMaps());
assertEquals("Incorrect num reduce tasks", numReds, stats.getNoOfReds());
if (job != null) {
assertNotNull("Missing job", job);
}
// check running job
assertTrue("Incorrect job", job == stats.getJob());
if (jStatus != null) {
assertNotNull("Missing job status", jStatus);
}
// check job stats
assertTrue("Incorrect job status", jStatus == stats.getJobStatus());
}
// test the cluster stats
private static void testClusterStats(int numSubmittedMapTasks,
int numSubmittedReduceTasks,
int numSubmittedJobs) {
assertEquals("Incorrect count of total number of submitted map tasks",
numSubmittedMapTasks, ClusterStats.getSubmittedMapTasks());
assertEquals("Incorrect count of total number of submitted reduce tasks",
numSubmittedReduceTasks,
ClusterStats.getSubmittedReduceTasks());
assertEquals("Incorrect submitted jobs",
numSubmittedJobs, ClusterStats.getRunningJobStats().size());
}
}
| 7,065 | 31.412844 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.hadoop.mapred.TaskStatus.State;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo;
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo;
import org.apache.hadoop.tools.rumen.TaskInfo;
import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.InputSplit;
import java.util.ArrayList;
import java.util.Random;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.TimeUnit;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class DebugJobProducer implements JobStoryProducer {
public static final Log LOG = LogFactory.getLog(DebugJobProducer.class);
final ArrayList<JobStory> submitted;
private final Configuration conf;
private final AtomicInteger numJobs;
public DebugJobProducer(int numJobs, Configuration conf) {
super();
MockJob.reset();
this.conf = conf;
this.numJobs = new AtomicInteger(numJobs);
this.submitted = new ArrayList<JobStory>();
}
@Override
public JobStory getNextJob() throws IOException {
if (numJobs.getAndDecrement() > 0) {
final MockJob ret = new MockJob(conf);
submitted.add(ret);
return ret;
}
return null;
}
@Override
public void close() {
}
static double[] getDistr(Random r, double mindist, int size) {
assert 0.0 <= mindist && mindist <= 1.0;
final double min = mindist / size;
final double rem = 1.0 - min * size;
final double[] tmp = new double[size];
for (int i = 0; i < tmp.length - 1; ++i) {
tmp[i] = r.nextDouble() * rem;
}
tmp[tmp.length - 1] = rem;
Arrays.sort(tmp);
final double[] ret = new double[size];
ret[0] = tmp[0] + min;
for (int i = 1; i < size; ++i) {
ret[i] = tmp[i] - tmp[i - 1] + min;
}
return ret;
}
/**
* Generate random task data for a synthetic job.
*/
static class MockJob implements JobStory {
static final int MIN_REC = 1 << 14;
static final int MIN_BYTES = 1 << 20;
static final int VAR_REC = 1 << 14;
static final int VAR_BYTES = 4 << 20;
static final int MAX_MAP = 5;
static final int MAX_RED = 3;
final Configuration conf;
static void initDist(
Random r, double min, int[] recs, long[] bytes, long tot_recs,
long tot_bytes) {
final double[] recs_dist = getDistr(r, min, recs.length);
final double[] bytes_dist = getDistr(r, min, recs.length);
long totalbytes = 0L;
int totalrecs = 0;
for (int i = 0; i < recs.length; ++i) {
recs[i] = (int) Math.round(tot_recs * recs_dist[i]);
bytes[i] = Math.round(tot_bytes * bytes_dist[i]);
totalrecs += recs[i];
totalbytes += bytes[i];
}
// Add/remove excess
recs[0] += totalrecs - tot_recs;
bytes[0] += totalbytes - tot_bytes;
LOG.info(
"DIST: " + Arrays.toString(recs) + " " + tot_recs + "/" + totalrecs +
" " + Arrays.toString(bytes) + " " + tot_bytes + "/" + totalbytes);
}
private static final AtomicInteger seq = new AtomicInteger(0);
// set timestamp in the past
private static final AtomicLong timestamp = new AtomicLong(
System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(
60, TimeUnit.DAYS));
private final int id;
private final String name;
private final int[] m_recsIn, m_recsOut, r_recsIn, r_recsOut;
private final long[] m_bytesIn, m_bytesOut, r_bytesIn, r_bytesOut;
private final long submitTime;
public MockJob(Configuration conf) {
final Random r = new Random();
final long seed = r.nextLong();
r.setSeed(seed);
id = seq.getAndIncrement();
name = String.format("MOCKJOB%06d", id);
this.conf = conf;
LOG.info(name + " (" + seed + ")");
submitTime = timestamp.addAndGet(
TimeUnit.MILLISECONDS.convert(
r.nextInt(10), TimeUnit.SECONDS));
m_recsIn = new int[r.nextInt(MAX_MAP) + 1];
m_bytesIn = new long[m_recsIn.length];
m_recsOut = new int[m_recsIn.length];
m_bytesOut = new long[m_recsIn.length];
r_recsIn = new int[r.nextInt(MAX_RED) + 1];
r_bytesIn = new long[r_recsIn.length];
r_recsOut = new int[r_recsIn.length];
r_bytesOut = new long[r_recsIn.length];
// map input
final long map_recs = r.nextInt(VAR_REC) + MIN_REC;
final long map_bytes = r.nextInt(VAR_BYTES) + MIN_BYTES;
initDist(r, 0.5, m_recsIn, m_bytesIn, map_recs, map_bytes);
// shuffle
final long shuffle_recs = r.nextInt(VAR_REC) + MIN_REC;
final long shuffle_bytes = r.nextInt(VAR_BYTES) + MIN_BYTES;
initDist(r, 0.5, m_recsOut, m_bytesOut, shuffle_recs, shuffle_bytes);
initDist(r, 0.8, r_recsIn, r_bytesIn, shuffle_recs, shuffle_bytes);
// reduce output
final long red_recs = r.nextInt(VAR_REC) + MIN_REC;
final long red_bytes = r.nextInt(VAR_BYTES) + MIN_BYTES;
initDist(r, 0.5, r_recsOut, r_bytesOut, red_recs, red_bytes);
if (LOG.isDebugEnabled()) {
int iMapBTotal = 0, oMapBTotal = 0, iRedBTotal = 0, oRedBTotal = 0;
int iMapRTotal = 0, oMapRTotal = 0, iRedRTotal = 0, oRedRTotal = 0;
for (int i = 0; i < m_recsIn.length; ++i) {
iMapRTotal += m_recsIn[i];
iMapBTotal += m_bytesIn[i];
oMapRTotal += m_recsOut[i];
oMapBTotal += m_bytesOut[i];
}
for (int i = 0; i < r_recsIn.length; ++i) {
iRedRTotal += r_recsIn[i];
iRedBTotal += r_bytesIn[i];
oRedRTotal += r_recsOut[i];
oRedBTotal += r_bytesOut[i];
}
LOG.debug(
String.format(
"%s: M (%03d) %6d/%10d -> %6d/%10d" +
" R (%03d) %6d/%10d -> %6d/%10d @%d", name, m_bytesIn.length,
iMapRTotal, iMapBTotal, oMapRTotal, oMapBTotal, r_bytesIn.length,
iRedRTotal, iRedBTotal, oRedRTotal, oRedBTotal, submitTime));
}
}
@Override
public String getName() {
return name;
}
@Override
public String getUser() {
// Obtain user name from job configuration, if available.
// Otherwise use dummy user names.
String user = conf.get(MRJobConfig.USER_NAME);
if (user == null) {
user = String.format("foobar%d", id);
}
GridmixTestUtils.createHomeAndStagingDirectory(user, conf);
return user;
}
@Override
public JobID getJobID() {
return new JobID("job_mock_" + name, id);
}
@Override
public Values getOutcome() {
return Values.SUCCESS;
}
@Override
public long getSubmissionTime() {
return submitTime;
}
@Override
public int getNumberMaps() {
return m_bytesIn.length;
}
@Override
public int getNumberReduces() {
return r_bytesIn.length;
}
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
switch (taskType) {
case MAP:
return new TaskInfo(m_bytesIn[taskNumber], m_recsIn[taskNumber],
m_bytesOut[taskNumber], m_recsOut[taskNumber], -1);
case REDUCE:
return new TaskInfo(r_bytesIn[taskNumber], r_recsIn[taskNumber],
r_bytesOut[taskNumber], r_recsOut[taskNumber], -1);
default:
throw new IllegalArgumentException("Not interested");
}
}
@Override
public InputSplit[] getInputSplits() {
throw new UnsupportedOperationException();
}
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
TaskType taskType, int taskNumber, int taskAttemptNumber) {
switch (taskType) {
case MAP:
return new MapTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
m_bytesIn[taskNumber], m_recsIn[taskNumber],
m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
100);
case REDUCE:
return new ReduceTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
r_bytesIn[taskNumber], r_recsIn[taskNumber],
r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
100, 100, 100);
}
throw new UnsupportedOperationException();
}
@Override
public TaskAttemptInfo getMapTaskAttemptInfoAdjusted(
int taskNumber, int taskAttemptNumber, int locality) {
throw new UnsupportedOperationException();
}
@Override
public org.apache.hadoop.mapred.JobConf getJobConf() {
return new JobConf(conf);
}
@Override
public String getQueueName() {
String qName = "default";
return qName;
}
public static void reset() {
seq.set(0);
timestamp.set(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(
60, TimeUnit.DAYS));
}
}
}
| 10,209 | 31.619808 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/**
* Plugin class to test resource information reported by NM. Use configuration
* items {@link #MAXVMEM_TESTING_PROPERTY} and {@link #MAXPMEM_TESTING_PROPERTY}
* to tell NM the total vmem and the total pmem. Use configuration items
* {@link #NUM_PROCESSORS}, {@link #CPU_FREQUENCY}, {@link #CUMULATIVE_CPU_TIME}
* and {@link #CPU_USAGE} to tell TT the CPU information.
*/
@InterfaceAudience.Private
public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
/** max vmem on the TT */
public static final String MAXVMEM_TESTING_PROPERTY =
"mapred.tasktracker.maxvmem.testing";
/** max pmem on the TT */
public static final String MAXPMEM_TESTING_PROPERTY =
"mapred.tasktracker.maxpmem.testing";
/** number of processors for testing */
public static final String NUM_PROCESSORS =
"mapred.tasktracker.numprocessors.testing";
/** CPU frequency for testing */
public static final String CPU_FREQUENCY =
"mapred.tasktracker.cpufrequency.testing";
/** cumulative CPU usage time for testing */
public static final String CUMULATIVE_CPU_TIME =
"mapred.tasktracker.cumulativecputime.testing";
/** CPU usage percentage for testing */
public static final String CPU_USAGE = "mapred.tasktracker.cpuusage.testing";
/** cumulative number of bytes read over the network */
public static final String NETWORK_BYTES_READ =
"mapred.tasktracker.networkread.testing";
/** cumulative number of bytes written over the network */
public static final String NETWORK_BYTES_WRITTEN =
"mapred.tasktracker.networkwritten.testing";
/** cumulative number of bytes read from disks */
public static final String STORAGE_BYTES_READ =
"mapred.tasktracker.storageread.testing";
/** cumulative number of bytes written to disks */
public static final String STORAGE_BYTES_WRITTEN =
"mapred.tasktracker.storagewritten.testing";
/** process cumulative CPU usage time for testing */
public static final String PROC_CUMULATIVE_CPU_TIME =
"mapred.tasktracker.proccumulativecputime.testing";
/** process pmem for testing */
public static final String PROC_PMEM_TESTING_PROPERTY =
"mapred.tasktracker.procpmem.testing";
/** process vmem for testing */
public static final String PROC_VMEM_TESTING_PROPERTY =
"mapred.tasktracker.procvmem.testing";
/** {@inheritDoc} */
@Override
public long getVirtualMemorySize() {
return getConf().getLong(MAXVMEM_TESTING_PROPERTY, -1);
}
/** {@inheritDoc} */
@Override
public long getPhysicalMemorySize() {
return getConf().getLong(MAXPMEM_TESTING_PROPERTY, -1);
}
/** {@inheritDoc} */
@Override
public long getAvailableVirtualMemorySize() {
return getConf().getLong(MAXVMEM_TESTING_PROPERTY, -1);
}
/** {@inheritDoc} */
@Override
public long getAvailablePhysicalMemorySize() {
return getConf().getLong(MAXPMEM_TESTING_PROPERTY, -1);
}
/** {@inheritDoc} */
@Override
public int getNumProcessors() {
return getConf().getInt(NUM_PROCESSORS, -1);
}
/** {@inheritDoc} */
@Override
public int getNumCores() {
return getNumProcessors();
}
/** {@inheritDoc} */
@Override
public long getCpuFrequency() {
return getConf().getLong(CPU_FREQUENCY, -1);
}
/** {@inheritDoc} */
@Override
public long getCumulativeCpuTime() {
return getConf().getLong(CUMULATIVE_CPU_TIME, -1);
}
/** {@inheritDoc} */
@Override
public float getCpuUsage() {
return getConf().getFloat(CPU_USAGE, -1);
}
/** {@inheritDoc} */
@Override
public long getNetworkBytesRead() {
return getConf().getLong(NETWORK_BYTES_READ, -1);
}
/** {@inheritDoc} */
@Override
public long getNetworkBytesWritten() {
return getConf().getLong(NETWORK_BYTES_WRITTEN, -1);
}
/** {@inheritDoc} */
@Override
public long getStorageBytesRead() {
return getConf().getLong(STORAGE_BYTES_READ, -1);
}
/** {@inheritDoc} */
@Override
public long getStorageBytesWritten() {
return getConf().getLong(STORAGE_BYTES_WRITTEN, -1);
}
}
| 5,041 | 32.390728 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.util.Arrays;
import java.util.Random;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
public class TestGridmixRecord {
private static final Log LOG = LogFactory.getLog(TestGridmixRecord.class);
static void lengthTest(GridmixRecord x, GridmixRecord y, int min,
int max) throws Exception {
final Random r = new Random();
final long seed = r.nextLong();
r.setSeed(seed);
LOG.info("length: " + seed);
final DataInputBuffer in = new DataInputBuffer();
final DataOutputBuffer out1 = new DataOutputBuffer();
final DataOutputBuffer out2 = new DataOutputBuffer();
for (int i = min; i < max; ++i) {
setSerialize(x, r.nextLong(), i, out1);
// check write
assertEquals(i, out1.getLength());
// write to stream
x.write(out2);
// check read
in.reset(out1.getData(), 0, out1.getLength());
y.readFields(in);
assertEquals(i, x.getSize());
assertEquals(i, y.getSize());
}
// check stream read
in.reset(out2.getData(), 0, out2.getLength());
for (int i = min; i < max; ++i) {
y.readFields(in);
assertEquals(i, y.getSize());
}
}
static void randomReplayTest(GridmixRecord x, GridmixRecord y, int min,
int max) throws Exception {
final Random r = new Random();
final long seed = r.nextLong();
r.setSeed(seed);
LOG.info("randReplay: " + seed);
final DataOutputBuffer out1 = new DataOutputBuffer();
for (int i = min; i < max; ++i) {
final int s = out1.getLength();
x.setSeed(r.nextLong());
x.setSize(i);
x.write(out1);
assertEquals(i, out1.getLength() - s);
}
final DataInputBuffer in = new DataInputBuffer();
in.reset(out1.getData(), 0, out1.getLength());
final DataOutputBuffer out2 = new DataOutputBuffer();
// deserialize written records, write to separate buffer
for (int i = min; i < max; ++i) {
final int s = in.getPosition();
y.readFields(in);
assertEquals(i, in.getPosition() - s);
y.write(out2);
}
// verify written contents match
assertEquals(out1.getLength(), out2.getLength());
// assumes that writes will grow buffer deterministically
assertEquals("Bad test", out1.getData().length, out2.getData().length);
assertArrayEquals(out1.getData(), out2.getData());
}
static void eqSeedTest(GridmixRecord x, GridmixRecord y, int max)
throws Exception {
final Random r = new Random();
final long s = r.nextLong();
r.setSeed(s);
LOG.info("eqSeed: " + s);
assertEquals(x.fixedBytes(), y.fixedBytes());
final int min = x.fixedBytes() + 1;
final DataOutputBuffer out1 = new DataOutputBuffer();
final DataOutputBuffer out2 = new DataOutputBuffer();
for (int i = min; i < max; ++i) {
final long seed = r.nextLong();
setSerialize(x, seed, i, out1);
setSerialize(y, seed, i, out2);
assertEquals(x, y);
assertEquals(x.hashCode(), y.hashCode());
// verify written contents match
assertEquals(out1.getLength(), out2.getLength());
// assumes that writes will grow buffer deterministically
assertEquals("Bad test", out1.getData().length, out2.getData().length);
assertArrayEquals(out1.getData(), out2.getData());
}
}
static void binSortTest(GridmixRecord x, GridmixRecord y, int min,
int max, WritableComparator cmp) throws Exception {
final Random r = new Random();
final long s = r.nextLong();
r.setSeed(s);
LOG.info("sort: " + s);
final DataOutputBuffer out1 = new DataOutputBuffer();
final DataOutputBuffer out2 = new DataOutputBuffer();
for (int i = min; i < max; ++i) {
final long seed1 = r.nextLong();
setSerialize(x, seed1, i, out1);
assertEquals(0, x.compareSeed(seed1, Math.max(0, i - x.fixedBytes())));
final long seed2 = r.nextLong();
setSerialize(y, seed2, i, out2);
assertEquals(0, y.compareSeed(seed2, Math.max(0, i - x.fixedBytes())));
// for eq sized records, ensure byte cmp where req
final int chk = WritableComparator.compareBytes(
out1.getData(), 0, out1.getLength(),
out2.getData(), 0, out2.getLength());
assertEquals(Integer.signum(chk), Integer.signum(x.compareTo(y)));
assertEquals(Integer.signum(chk), Integer.signum(cmp.compare(
out1.getData(), 0, out1.getLength(),
out2.getData(), 0, out2.getLength())));
// write second copy, compare eq
final int s1 = out1.getLength();
x.write(out1);
assertEquals(0, cmp.compare(out1.getData(), 0, s1,
out1.getData(), s1, out1.getLength() - s1));
final int s2 = out2.getLength();
y.write(out2);
assertEquals(0, cmp.compare(out2.getData(), 0, s2,
out2.getData(), s2, out2.getLength() - s2));
assertEquals(Integer.signum(chk), Integer.signum(cmp.compare(out1.getData(), 0, s1,
out2.getData(), s2, out2.getLength() - s2)));
}
}
static void checkSpec(GridmixKey a, GridmixKey b) throws Exception {
final Random r = new Random();
final long s = r.nextLong();
r.setSeed(s);
LOG.info("spec: " + s);
final DataInputBuffer in = new DataInputBuffer();
final DataOutputBuffer out = new DataOutputBuffer();
a.setType(GridmixKey.REDUCE_SPEC);
b.setType(GridmixKey.REDUCE_SPEC);
for (int i = 0; i < 100; ++i) {
final int in_rec = r.nextInt(Integer.MAX_VALUE);
a.setReduceInputRecords(in_rec);
final int out_rec = r.nextInt(Integer.MAX_VALUE);
a.setReduceOutputRecords(out_rec);
final int out_bytes = r.nextInt(Integer.MAX_VALUE);
a.setReduceOutputBytes(out_bytes);
final int min = WritableUtils.getVIntSize(in_rec)
+ WritableUtils.getVIntSize(out_rec)
+ WritableUtils.getVIntSize(out_bytes)
+ WritableUtils.getVIntSize(0);
assertEquals(min + 2, a.fixedBytes()); // meta + vint min
final int size = r.nextInt(1024) + a.fixedBytes() + 1;
setSerialize(a, r.nextLong(), size, out);
assertEquals(size, out.getLength());
assertTrue(a.equals(a));
assertEquals(0, a.compareTo(a));
in.reset(out.getData(), 0, out.getLength());
b.readFields(in);
assertEquals(size, b.getSize());
assertEquals(in_rec, b.getReduceInputRecords());
assertEquals(out_rec, b.getReduceOutputRecords());
assertEquals(out_bytes, b.getReduceOutputBytes());
assertTrue(a.equals(b));
assertEquals(0, a.compareTo(b));
assertEquals(a.hashCode(), b.hashCode());
}
}
static void setSerialize(GridmixRecord x, long seed, int size,
DataOutputBuffer out) throws IOException {
x.setSeed(seed);
x.setSize(size);
out.reset();
x.write(out);
}
@Test
public void testKeySpec() throws Exception {
final int min = 6;
final int max = 300;
final GridmixKey a = new GridmixKey(GridmixKey.REDUCE_SPEC, 1, 0L);
final GridmixKey b = new GridmixKey(GridmixKey.REDUCE_SPEC, 1, 0L);
lengthTest(a, b, min, max);
randomReplayTest(a, b, min, max);
binSortTest(a, b, min, max, new GridmixKey.Comparator());
// 2 fixed GR bytes, 1 type, 3 spec
eqSeedTest(a, b, max);
checkSpec(a, b);
}
@Test
public void testKeyData() throws Exception {
final int min = 2;
final int max = 300;
final GridmixKey a = new GridmixKey(GridmixKey.DATA, 1, 0L);
final GridmixKey b = new GridmixKey(GridmixKey.DATA, 1, 0L);
lengthTest(a, b, min, max);
randomReplayTest(a, b, min, max);
binSortTest(a, b, min, max, new GridmixKey.Comparator());
// 2 fixed GR bytes, 1 type
eqSeedTest(a, b, 300);
}
@Test
public void testBaseRecord() throws Exception {
final int min = 1;
final int max = 300;
final GridmixRecord a = new GridmixRecord();
final GridmixRecord b = new GridmixRecord();
lengthTest(a, b, min, max);
randomReplayTest(a, b, min, max);
binSortTest(a, b, min, max, new GridmixRecord.Comparator());
// 2 fixed GR bytes
eqSeedTest(a, b, 300);
}
public static void main(String[] argv) throws Exception {
boolean fail = false;
final TestGridmixRecord test = new TestGridmixRecord();
try { test.testKeySpec(); } catch (Exception e) {
fail = true;
e.printStackTrace();
}
try {test.testKeyData(); } catch (Exception e) {
fail = true;
e.printStackTrace();
}
try {test.testBaseRecord(); } catch (Exception e) {
fail = true;
e.printStackTrace();
}
System.exit(fail ? -1 : 0);
}
static void printDebug(GridmixRecord a, GridmixRecord b) throws IOException {
DataOutputBuffer out = new DataOutputBuffer();
a.write(out);
System.out.println("A " +
Arrays.toString(Arrays.copyOf(out.getData(), out.getLength())));
out.reset();
b.write(out);
System.out.println("B " +
Arrays.toString(Arrays.copyOf(out.getData(), out.getLength())));
}
}
| 10,160 | 35.419355 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestDistCacheEmulation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import static org.junit.Assert.*;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Validate emulation of distributed cache load in gridmix simulated jobs.
*
*/
public class TestDistCacheEmulation {
private DistributedCacheEmulator dce = null;
@BeforeClass
public static void init() throws IOException {
GridmixTestUtils.initCluster(TestDistCacheEmulation.class);
File target=new File("target"+File.separator+TestDistCacheEmulation.class.getName());
if(!target.exists()){
assertTrue(target.mkdirs());
}
}
@AfterClass
public static void shutDown() throws IOException {
GridmixTestUtils.shutdownCluster();
}
/**
* Validate the dist cache files generated by GenerateDistCacheData job.
*
* @param jobConf
* configuration of GenerateDistCacheData job.
* @param sortedFileSizes
* array of sorted distributed cache file sizes
* @throws IOException
* @throws FileNotFoundException
*/
private void validateDistCacheData(Configuration jobConf,
long[] sortedFileSizes) throws FileNotFoundException, IOException {
Path distCachePath = dce.getDistributedCacheDir();
String filesListFile = jobConf
.get(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_LIST);
FileSystem fs = FileSystem.get(jobConf);
// Validate the existence of Distributed Cache files list file directly
// under distributed cache directory
Path listFile = new Path(filesListFile);
assertTrue("Path of Distributed Cache files list file is wrong.",
distCachePath.equals(listFile.getParent().makeQualified(fs.getUri(), fs.getWorkingDirectory())));
// Delete the dist cache files list file
assertTrue(
"Failed to delete distributed Cache files list file " + listFile,
fs.delete(listFile,true));
List<Long> fileSizes = new ArrayList<Long>();
for (long size : sortedFileSizes) {
fileSizes.add(size);
}
// validate dist cache files after deleting the 'files list file'
validateDistCacheFiles(fileSizes, distCachePath);
}
/**
* Validate private/public distributed cache files.
*
* @param filesSizesExpected
* list of sizes of expected dist cache files
* @param distCacheDir
* the distributed cache dir to be validated
* @throws IOException
* @throws FileNotFoundException
*/
private void validateDistCacheFiles(List<Long> filesSizesExpected, Path distCacheDir)
throws FileNotFoundException, IOException {
// RemoteIterator<LocatedFileStatus> iter =
FileStatus[] statuses = GridmixTestUtils.dfs.listStatus(distCacheDir);
int numFiles = filesSizesExpected.size();
assertEquals("Number of files under distributed cache dir is wrong.",
numFiles, statuses.length);
for (int i = 0; i < numFiles; i++) {
FileStatus stat = statuses[i];
assertTrue("File size of distributed cache file "
+ stat.getPath().toUri().getPath() + " is wrong.",
filesSizesExpected.remove(stat.getLen()));
FsPermission perm = stat.getPermission();
assertEquals("Wrong permissions for distributed cache file "
+ stat.getPath().toUri().getPath(), new FsPermission(
GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_PERM), perm);
}
}
/**
* Configures 5 HDFS-based dist cache files and 1 local-FS-based dist cache
* file in the given Configuration object <code>conf</code>.
*
* @param conf
* configuration where dist cache config properties are to be set
* @return array of sorted HDFS-based distributed cache file sizes
* @throws IOException
*/
private long[] configureDummyDistCacheFiles(Configuration conf)
throws IOException {
String user = UserGroupInformation.getCurrentUser().getShortUserName();
conf.set("user.name", user);
// Set some dummy dist cache files in gridmix configuration so that they go
// into the configuration of JobStory objects.
String[] distCacheFiles = { "hdfs:///tmp/file1.txt",
"/tmp/" + user + "/.staging/job_1/file2.txt",
"hdfs:///user/user1/file3.txt", "/home/user2/file4.txt",
"subdir1/file5.txt", "subdir2/file6.gz" };
String[] fileSizes = { "400", "2500", "700", "1200", "1500", "500" };
String[] visibilities = { "true", "false", "false", "true", "true", "false" };
String[] timeStamps = { "1234", "2345", "34567", "5434", "125", "134" };
// DistributedCache.setCacheFiles(fileCaches, conf);
conf.setStrings(MRJobConfig.CACHE_FILES, distCacheFiles);
conf.setStrings(MRJobConfig.CACHE_FILES_SIZES, fileSizes);
conf.setStrings(JobContext.CACHE_FILE_VISIBILITIES, visibilities);
conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timeStamps);
// local FS based dist cache file whose path contains <user>/.staging is
// not created on HDFS. So file size 2500 is not added to sortedFileSizes.
long[] sortedFileSizes = new long[] { 1500, 1200, 700, 500, 400 };
return sortedFileSizes;
}
/**
* Runs setupGenerateDistCacheData() on a new DistrbutedCacheEmulator and and
* returns the jobConf. Fills the array <code>sortedFileSizes</code> that can
* be used for validation. Validation of exit code from
* setupGenerateDistCacheData() is done.
*
* @param generate
* true if -generate option is specified
* @param sortedFileSizes
* sorted HDFS-based distributed cache file sizes
* @throws IOException
* @throws InterruptedException
*/
private Configuration runSetupGenerateDistCacheData(boolean generate,
long[] sortedFileSizes) throws IOException, InterruptedException {
Configuration conf = new Configuration();
long[] fileSizes = configureDummyDistCacheFiles(conf);
System.arraycopy(fileSizes, 0, sortedFileSizes, 0, fileSizes.length);
// Job stories of all 3 jobs will have same dist cache files in their
// configurations
final int numJobs = 3;
DebugJobProducer jobProducer = new DebugJobProducer(numJobs, conf);
Configuration jobConf = GridmixTestUtils.mrvl.getConfig();
Path ioPath = new Path("testSetupGenerateDistCacheData")
.makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
FileSystem fs = FileSystem.get(jobConf);
if (fs.exists(ioPath)) {
fs.delete(ioPath, true);
}
FileSystem.mkdirs(fs, ioPath, new FsPermission((short) 0777));
dce = createDistributedCacheEmulator(jobConf, ioPath, generate);
int exitCode = dce.setupGenerateDistCacheData(jobProducer);
int expectedExitCode = generate ? 0
: Gridmix.MISSING_DIST_CACHE_FILES_ERROR;
assertEquals("setupGenerateDistCacheData failed.", expectedExitCode,
exitCode);
// reset back
resetDistCacheConfigProperties(jobConf);
return jobConf;
}
/**
* Reset the config properties related to Distributed Cache in the given job
* configuration <code>jobConf</code>.
*
* @param jobConf
* job configuration
*/
private void resetDistCacheConfigProperties(Configuration jobConf) {
// reset current/latest property names
jobConf.setStrings(MRJobConfig.CACHE_FILES, "");
jobConf.setStrings(MRJobConfig.CACHE_FILES_SIZES, "");
jobConf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, "");
jobConf.setStrings(JobContext.CACHE_FILE_VISIBILITIES, "");
// reset old property names
jobConf.setStrings("mapred.cache.files", "");
jobConf.setStrings("mapred.cache.files.filesizes", "");
jobConf.setStrings("mapred.cache.files.visibilities", "");
jobConf.setStrings("mapred.cache.files.timestamps", "");
}
/**
* Validate GenerateDistCacheData job if it creates dist cache files properly.
*
* @throws Exception
*/
@Test (timeout=200000)
public void testGenerateDistCacheData() throws Exception {
long[] sortedFileSizes = new long[5];
Configuration jobConf = runSetupGenerateDistCacheData(true, sortedFileSizes);
GridmixJob gridmixJob = new GenerateDistCacheData(jobConf);
Job job = gridmixJob.call();
assertEquals("Number of reduce tasks in GenerateDistCacheData is not 0.",
0, job.getNumReduceTasks());
assertTrue("GenerateDistCacheData job failed.",
job.waitForCompletion(false));
validateDistCacheData(jobConf, sortedFileSizes);
}
/**
* Validate setupGenerateDistCacheData by validating <li>permissions of the
* distributed cache directories and <li>content of the generated sequence
* file. This includes validation of dist cache file paths and their file
* sizes.
*/
private void validateSetupGenDC(Configuration jobConf, long[] sortedFileSizes)
throws IOException, InterruptedException {
// build things needed for validation
long sumOfFileSizes = 0;
for (int i = 0; i < sortedFileSizes.length; i++) {
sumOfFileSizes += sortedFileSizes[i];
}
FileSystem fs = FileSystem.get(jobConf);
assertEquals("Number of distributed cache files to be generated is wrong.",
sortedFileSizes.length,
jobConf.getInt(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_COUNT, -1));
assertEquals("Total size of dist cache files to be generated is wrong.",
sumOfFileSizes,
jobConf.getLong(GenerateDistCacheData.GRIDMIX_DISTCACHE_BYTE_COUNT, -1));
Path filesListFile = new Path(
jobConf.get(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_LIST));
FileStatus stat = fs.getFileStatus(filesListFile);
assertEquals("Wrong permissions of dist Cache files list file "
+ filesListFile, new FsPermission((short) 0644), stat.getPermission());
InputSplit split = new FileSplit(filesListFile, 0, stat.getLen(),
(String[]) null);
TaskAttemptContext taskContext = MapReduceTestUtil
.createDummyMapTaskAttemptContext(jobConf);
RecordReader<LongWritable, BytesWritable> reader = new GenerateDistCacheData.GenDCDataFormat()
.createRecordReader(split, taskContext);
MapContext<LongWritable, BytesWritable, NullWritable, BytesWritable> mapContext = new MapContextImpl<LongWritable, BytesWritable, NullWritable, BytesWritable>(
jobConf, taskContext.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mapContext);
// start validating setupGenerateDistCacheData
doValidateSetupGenDC(reader, fs, sortedFileSizes);
}
/**
* Validate setupGenerateDistCacheData by validating <li>permissions of the
* distributed cache directory and <li>content of the generated sequence file.
* This includes validation of dist cache file paths and their file sizes.
*/
private void doValidateSetupGenDC(
RecordReader<LongWritable, BytesWritable> reader, FileSystem fs,
long[] sortedFileSizes) throws IOException, InterruptedException {
// Validate permissions of dist cache directory
Path distCacheDir = dce.getDistributedCacheDir();
assertEquals(
"Wrong permissions for distributed cache dir " + distCacheDir,
fs.getFileStatus(distCacheDir).getPermission().getOtherAction()
.and(FsAction.EXECUTE), FsAction.EXECUTE);
// Validate the content of the sequence file generated by
// dce.setupGenerateDistCacheData().
LongWritable key = new LongWritable();
BytesWritable val = new BytesWritable();
for (int i = 0; i < sortedFileSizes.length; i++) {
assertTrue("Number of files written to the sequence file by "
+ "setupGenerateDistCacheData is less than the expected.",
reader.nextKeyValue());
key = reader.getCurrentKey();
val = reader.getCurrentValue();
long fileSize = key.get();
String file = new String(val.getBytes(), 0, val.getLength());
// Dist Cache files should be sorted based on file size.
assertEquals("Dist cache file size is wrong.", sortedFileSizes[i],
fileSize);
// Validate dist cache file path.
// parent dir of dist cache file
Path parent = new Path(file).getParent().makeQualified(fs.getUri(),fs.getWorkingDirectory());
// should exist in dist cache dir
assertTrue("Public dist cache file path is wrong.",
distCacheDir.equals(parent));
}
}
/**
* Test if DistributedCacheEmulator's setup of GenerateDistCacheData is
* working as expected.
*
* @throws IOException
* @throws InterruptedException
*/
@Test (timeout=20000)
public void testSetupGenerateDistCacheData() throws IOException,
InterruptedException {
long[] sortedFileSizes = new long[5];
Configuration jobConf = runSetupGenerateDistCacheData(true, sortedFileSizes);
validateSetupGenDC(jobConf, sortedFileSizes);
// Verify if correct exit code is seen when -generate option is missing and
// distributed cache files are missing in the expected path.
runSetupGenerateDistCacheData(false, sortedFileSizes);
}
/**
* Create DistributedCacheEmulator object and do the initialization by calling
* init() on it with dummy trace. Also configure the pseudo local FS.
*/
private DistributedCacheEmulator createDistributedCacheEmulator(
Configuration conf, Path ioPath, boolean generate) throws IOException {
DistributedCacheEmulator dce = new DistributedCacheEmulator(conf, ioPath);
JobCreator jobCreator = JobCreator.getPolicy(conf, JobCreator.LOADJOB);
jobCreator.setDistCacheEmulator(dce);
dce.init("dummytrace", jobCreator, generate);
return dce;
}
/**
* Test the configuration property for disabling/enabling emulation of
* distributed cache load.
*/
@Test (timeout=2000)
public void testDistCacheEmulationConfigurability() throws IOException {
Configuration jobConf = GridmixTestUtils.mrvl.getConfig();
Path ioPath = new Path("testDistCacheEmulationConfigurability")
.makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
FileSystem fs = FileSystem.get(jobConf);
FileSystem.mkdirs(fs, ioPath, new FsPermission((short) 0777));
// default config
dce = createDistributedCacheEmulator(jobConf, ioPath, false);
assertTrue("Default configuration of "
+ DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
+ " is wrong.", dce.shouldEmulateDistCacheLoad());
// config property set to false
jobConf.setBoolean(
DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE, false);
dce = createDistributedCacheEmulator(jobConf, ioPath, false);
assertFalse("Disabling of emulation of distributed cache load by setting "
+ DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
+ " to false is not working.", dce.shouldEmulateDistCacheLoad());
}
/**
* test method configureDistCacheFiles
*
*/
@Test (timeout=2000)
public void testDistCacheEmulator() throws Exception {
Configuration conf = new Configuration();
configureDummyDistCacheFiles(conf);
File ws = new File("target" + File.separator + this.getClass().getName());
Path ioPath = new Path(ws.getAbsolutePath());
DistributedCacheEmulator dce = new DistributedCacheEmulator(conf, ioPath);
JobConf jobConf = new JobConf(conf);
jobConf.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
File fin=new File("src"+File.separator+"test"+File.separator+"resources"+File.separator+"data"+File.separator+"wordcount.json");
dce.init(fin.getAbsolutePath(), JobCreator.LOADJOB, true);
dce.configureDistCacheFiles(conf, jobConf);
String[] caches=conf.getStrings(MRJobConfig.CACHE_FILES);
String[] tmpfiles=conf.getStrings("tmpfiles");
// this method should fill caches AND tmpfiles from MRJobConfig.CACHE_FILES property
assertEquals(6, ((caches==null?0:caches.length)+(tmpfiles==null?0:tmpfiles.length)));
}
}
| 17,904 | 40.542923 | 163 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.mapred.gridmix.RandomTextDataGenerator;
import static org.junit.Assert.*;
import org.junit.Test;
/**
* Test {@link RandomTextDataGenerator}.
*/
public class TestRandomTextDataGenerator {
/**
* Test if {@link RandomTextDataGenerator} can generate random words of
* desired size.
*/
@Test
public void testRandomTextDataGenerator() {
RandomTextDataGenerator rtdg = new RandomTextDataGenerator(10, 0L, 5);
List<String> words = rtdg.getRandomWords();
// check the size
assertEquals("List size mismatch", 10, words.size());
// check the words
Set<String> wordsSet = new HashSet<String>(words);
assertEquals("List size mismatch due to duplicates", 10, wordsSet.size());
// check the word lengths
for (String word : wordsSet) {
assertEquals("Word size mismatch", 5, word.length());
}
}
/**
* Test if {@link RandomTextDataGenerator} can generate same words given the
* same list-size, word-length and seed.
*/
@Test
public void testRandomTextDataGeneratorRepeatability() {
RandomTextDataGenerator rtdg1 = new RandomTextDataGenerator(10, 0L, 5);
List<String> words1 = rtdg1.getRandomWords();
RandomTextDataGenerator rtdg2 = new RandomTextDataGenerator(10, 0L, 5);
List<String> words2 = rtdg2.getRandomWords();
assertTrue("List mismatch", words1.equals(words2));
}
/**
* Test if {@link RandomTextDataGenerator} can generate different words given
* different seeds.
*/
@Test
public void testRandomTextDataGeneratorUniqueness() {
RandomTextDataGenerator rtdg1 = new RandomTextDataGenerator(10, 1L, 5);
Set<String> words1 = new HashSet(rtdg1.getRandomWords());
RandomTextDataGenerator rtdg2 = new RandomTextDataGenerator(10, 0L, 5);
Set<String> words2 = new HashSet(rtdg2.getRandomWords());
assertFalse("List size mismatch across lists", words1.equals(words2));
}
}
| 2,863 | 32.694118 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.gridmix.DebugJobProducer.MockJob;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import org.junit.Test;
/**
* Test if Gridmix correctly configures the simulated job's configuration for
* high ram job properties.
*/
public class TestHighRamJob {
/**
* A dummy {@link GridmixJob} that opens up the simulated job for testing.
*/
protected static class DummyGridmixJob extends GridmixJob {
public DummyGridmixJob(Configuration conf, JobStory desc)
throws IOException {
super(conf, System.currentTimeMillis(), desc, new Path("test"),
UserGroupInformation.getCurrentUser(), -1);
}
/**
* Do nothing since this is a dummy gridmix job.
*/
@Override
public Job call() throws Exception {
return null;
}
@Override
protected boolean canEmulateCompression() {
// return false as we don't need compression
return false;
}
protected Job getJob() {
// open the simulated job for testing
return job;
}
}
private static void testHighRamConfig(long jobMapMB, long jobReduceMB,
long clusterMapMB, long clusterReduceMB, long simulatedClusterMapMB,
long simulatedClusterReduceMB, long expectedMapMB, long expectedReduceMB,
Configuration gConf)
throws IOException {
Configuration simulatedJobConf = new Configuration(gConf);
simulatedJobConf.setLong(MRConfig.MAPMEMORY_MB, simulatedClusterMapMB);
simulatedJobConf.setLong(MRConfig.REDUCEMEMORY_MB,
simulatedClusterReduceMB);
// define a source conf
Configuration sourceConf = new Configuration();
// configure the original job
sourceConf.setLong(MRJobConfig.MAP_MEMORY_MB, jobMapMB);
sourceConf.setLong(MRConfig.MAPMEMORY_MB, clusterMapMB);
sourceConf.setLong(MRJobConfig.REDUCE_MEMORY_MB, jobReduceMB);
sourceConf.setLong(MRConfig.REDUCEMEMORY_MB, clusterReduceMB);
// define a mock job
MockJob story = new MockJob(sourceConf);
GridmixJob job = new DummyGridmixJob(simulatedJobConf, story);
Job simulatedJob = job.getJob();
Configuration simulatedConf = simulatedJob.getConfiguration();
// check if the high ram properties are not set
assertEquals(expectedMapMB,
simulatedConf.getLong(MRJobConfig.MAP_MEMORY_MB,
MRJobConfig.DEFAULT_MAP_MEMORY_MB));
assertEquals(expectedReduceMB,
simulatedConf.getLong(MRJobConfig.REDUCE_MEMORY_MB,
MRJobConfig.DEFAULT_MAP_MEMORY_MB));
}
/**
* Tests high ram job properties configuration.
*/
@SuppressWarnings("deprecation")
@Test
public void testHighRamFeatureEmulation() throws IOException {
// define the gridmix conf
Configuration gridmixConf = new Configuration();
// test : check high ram emulation disabled
gridmixConf.setBoolean(GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE, false);
testHighRamConfig(10, 20, 5, 10, MRJobConfig.DEFAULT_MAP_MEMORY_MB,
MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,
MRJobConfig.DEFAULT_MAP_MEMORY_MB,
MRJobConfig.DEFAULT_REDUCE_MEMORY_MB, gridmixConf);
// test : check with high ram enabled (default) and no scaling
gridmixConf = new Configuration();
// set the deprecated max memory limit
gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,
20*1024*1024);
testHighRamConfig(10, 20, 5, 10, 5, 10, 10, 20, gridmixConf);
// test : check with high ram enabled and scaling
gridmixConf = new Configuration();
// set the new max map/reduce memory limits
gridmixConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB, 100);
gridmixConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB, 300);
testHighRamConfig(10, 45, 5, 15, 50, 100, 100, 300, gridmixConf);
// test : check with high ram enabled and map memory scaling mismatch
// (deprecated)
gridmixConf = new Configuration();
gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,
70*1024*1024);
Boolean failed = null;
try {
testHighRamConfig(10, 45, 5, 15, 50, 100, 100, 300, gridmixConf);
failed = false;
} catch (Exception e) {
failed = true;
}
assertNotNull(failed);
assertTrue("Exception expected for exceeding map memory limit "
+ "(deprecation)!", failed);
// test : check with high ram enabled and reduce memory scaling mismatch
// (deprecated)
gridmixConf = new Configuration();
gridmixConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,
150*1024*1024);
failed = null;
try {
testHighRamConfig(10, 45, 5, 15, 50, 100, 100, 300, gridmixConf);
failed = false;
} catch (Exception e) {
failed = true;
}
assertNotNull(failed);
assertTrue("Exception expected for exceeding reduce memory limit "
+ "(deprecation)!", failed);
// test : check with high ram enabled and scaling mismatch on map limits
gridmixConf = new Configuration();
gridmixConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB, 70);
failed = null;
try {
testHighRamConfig(10, 45, 5, 15, 50, 100, 100, 300, gridmixConf);
failed = false;
} catch (Exception e) {
failed = true;
}
assertNotNull(failed);
assertTrue("Exception expected for exceeding map memory limit!", failed);
// test : check with high ram enabled and scaling mismatch on reduce
// limits
gridmixConf = new Configuration();
gridmixConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB, 200);
failed = null;
try {
testHighRamConfig(10, 45, 5, 15, 50, 100, 100, 300, gridmixConf);
failed = false;
} catch (Exception e) {
failed = true;
}
assertNotNull(failed);
assertTrue("Exception expected for exceeding reduce memory limit!", failed);
}
}
| 7,361 | 36.753846 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestLoadJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
/*
Test LoadJob Gridmix sends data to job and after that
*/
public class TestLoadJob extends CommonJobTest {
public static final Log LOG = LogFactory.getLog(Gridmix.class);
static {
((Log4JLogger) LogFactory.getLog("org.apache.hadoop.mapred.gridmix"))
.getLogger().setLevel(Level.DEBUG);
((Log4JLogger) LogFactory.getLog(StressJobFactory.class)).getLogger()
.setLevel(Level.DEBUG);
}
@BeforeClass
public static void init() throws IOException {
GridmixTestUtils.initCluster(TestLoadJob.class);
}
@AfterClass
public static void shutDown() throws IOException {
GridmixTestUtils.shutdownCluster();
}
/*
* test serial policy with LoadJob. Task should execute without exceptions
*/
@Test (timeout=500000)
public void testSerialSubmit() throws Exception {
policy = GridmixJobSubmissionPolicy.SERIAL;
LOG.info("Serial started at " + System.currentTimeMillis());
doSubmission(JobCreator.LOADJOB.name(), false);
LOG.info("Serial ended at " + System.currentTimeMillis());
}
/*
* test reply policy with LoadJob
*/
@Test (timeout=500000)
public void testReplaySubmit() throws Exception {
policy = GridmixJobSubmissionPolicy.REPLAY;
LOG.info(" Replay started at " + System.currentTimeMillis());
doSubmission(JobCreator.LOADJOB.name(), false);
LOG.info(" Replay ended at " + System.currentTimeMillis());
}
}
| 2,561 | 30.243902 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.util.ExitUtil;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.InputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.zip.GZIPInputStream;
import static org.junit.Assert.*;
public class TestGridmixSubmission extends CommonJobTest {
private static File inSpace = new File("src" + File.separator + "test"
+ File.separator + "resources" + File.separator + "data");
static {
((Log4JLogger) LogFactory.getLog("org.apache.hadoop.mapred.gridmix"))
.getLogger().setLevel(Level.DEBUG);
}
@BeforeClass
public static void init() throws IOException {
GridmixTestUtils.initCluster(TestGridmixSubmission.class);
System.setProperty("src.test.data", inSpace.getAbsolutePath());
}
@AfterClass
public static void shutDown() throws IOException {
GridmixTestUtils.shutdownCluster();
}
/**
* Verifies that the given {@code JobStory} corresponds to the checked-in
* WordCount {@code JobStory}. The verification is effected via JUnit
* assertions.
*
* @param js the candidate JobStory.
*/
private void verifyWordCountJobStory(JobStory js) {
assertNotNull("Null JobStory", js);
String expectedJobStory = "WordCount:johndoe:default:1285322645148:3:1";
String actualJobStory = js.getName() + ":" + js.getUser() + ":"
+ js.getQueueName() + ":" + js.getSubmissionTime() + ":"
+ js.getNumberMaps() + ":" + js.getNumberReduces();
assertEquals("Unexpected JobStory", expectedJobStory, actualJobStory);
}
/**
* Expands a file compressed using {@code gzip}.
*
* @param fs the {@code FileSystem} corresponding to the given file.
* @param in the path to the compressed file.
* @param out the path to the uncompressed output.
* @throws Exception if there was an error during the operation.
*/
private void expandGzippedTrace(FileSystem fs, Path in, Path out)
throws Exception {
byte[] buff = new byte[4096];
GZIPInputStream gis = new GZIPInputStream(fs.open(in));
FSDataOutputStream fsdOs = fs.create(out);
int numRead;
while ((numRead = gis.read(buff, 0, buff.length)) != -1) {
fsdOs.write(buff, 0, numRead);
}
gis.close();
fsdOs.close();
}
/**
* Tests the reading of traces in GridMix3. These traces are generated by
* Rumen and are in the JSON format. The traces can optionally be compressed
* and uncompressed traces can also be passed to GridMix3 via its standard
* input stream. The testing is effected via JUnit assertions.
*
* @throws Exception if there was an error.
*/
@Test (timeout=20000)
public void testTraceReader() throws Exception {
Configuration conf = new Configuration();
FileSystem lfs = FileSystem.getLocal(conf);
Path rootInputDir = new Path(System.getProperty("src.test.data"));
rootInputDir = rootInputDir.makeQualified(lfs.getUri(),
lfs.getWorkingDirectory());
Path rootTempDir = new Path(System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "testTraceReader");
rootTempDir = rootTempDir.makeQualified(lfs.getUri(),
lfs.getWorkingDirectory());
Path inputFile = new Path(rootInputDir, "wordcount.json.gz");
Path tempFile = new Path(rootTempDir, "gridmix3-wc.json");
InputStream origStdIn = System.in;
InputStream tmpIs = null;
try {
DebugGridmix dgm = new DebugGridmix();
JobStoryProducer jsp = dgm.createJobStoryProducer(inputFile.toString(),
conf);
LOG.info("Verifying JobStory from compressed trace...");
verifyWordCountJobStory(jsp.getNextJob());
expandGzippedTrace(lfs, inputFile, tempFile);
jsp = dgm.createJobStoryProducer(tempFile.toString(), conf);
LOG.info("Verifying JobStory from uncompressed trace...");
verifyWordCountJobStory(jsp.getNextJob());
tmpIs = lfs.open(tempFile);
System.setIn(tmpIs);
LOG.info("Verifying JobStory from trace in standard input...");
jsp = dgm.createJobStoryProducer("-", conf);
verifyWordCountJobStory(jsp.getNextJob());
} finally {
System.setIn(origStdIn);
if (tmpIs != null) {
tmpIs.close();
}
lfs.delete(rootTempDir, true);
}
}
@Test (timeout=500000)
public void testReplaySubmit() throws Exception {
policy = GridmixJobSubmissionPolicy.REPLAY;
LOG.info(" Replay started at " + System.currentTimeMillis());
doSubmission(null, false);
LOG.info(" Replay ended at " + System.currentTimeMillis());
}
@Test (timeout=500000)
public void testStressSubmit() throws Exception {
policy = GridmixJobSubmissionPolicy.STRESS;
LOG.info(" Stress started at " + System.currentTimeMillis());
doSubmission(null, false);
LOG.info(" Stress ended at " + System.currentTimeMillis());
}
// test empty request should be hint message
@Test (timeout=100000)
public void testMain() throws Exception {
SecurityManager securityManager = System.getSecurityManager();
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
System.setErr(out);
ExitUtil.disableSystemExit();
try {
String[] argv = new String[0];
DebugGridmix.main(argv);
} catch (ExitUtil.ExitException e) {
assertEquals("ExitException", e.getMessage());
ExitUtil.resetFirstExitException();
} finally {
System.setErr(oldOut);
System.setSecurityManager(securityManager);
}
String print = bytes.toString();
// should be printed tip in std error stream
assertTrue(print
.contains("Usage: gridmix [-generate <MiB>] [-users URI] [-Dname=value ...] <iopath> <trace>"));
assertTrue(print.contains("e.g. gridmix -generate 100m foo -"));
}
}
| 7,252 | 34.729064 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
|
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.MiniMRClientCluster;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
public class GridmixTestUtils {
private static final Log LOG = LogFactory.getLog(GridmixTestUtils.class);
static final Path DEST = new Path("/gridmix");
static FileSystem dfs = null;
static MiniDFSCluster dfsCluster = null;
static MiniMRClientCluster mrvl = null;
protected static final String GRIDMIX_USE_QUEUE_IN_TRACE =
"gridmix.job-submission.use-queue-in-trace";
protected static final String GRIDMIX_DEFAULT_QUEUE =
"gridmix.job-submission.default-queue";
public static void initCluster(Class<?> caller) throws IOException {
Configuration conf = new Configuration();
// conf.set("mapred.queue.names", "default,q1,q2");
conf.set("mapred.queue.names", "default");
conf.set("yarn.scheduler.capacity.root.queues", "default");
conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
.build();// MiniDFSCluster(conf, 3, true, null);
dfs = dfsCluster.getFileSystem();
conf.set(JTConfig.JT_RETIREJOBS, "false");
mrvl = MiniMRClientClusterFactory.create(caller, 2, conf);
conf = mrvl.getConfig();
String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
if (files != null) {
String[] timestamps = new String[files.length];
for (int i = 0; i < files.length; i++) {
timestamps[i] = Long.toString(System.currentTimeMillis());
}
conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
}
}
public static void shutdownCluster() throws IOException {
if (mrvl != null) {
mrvl.stop();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
/**
* Methods to generate the home directory for dummy users.
*
* @param conf
*/
public static void createHomeAndStagingDirectory(String user,
Configuration conf) {
try {
FileSystem fs = dfsCluster.getFileSystem();
String path = "/user/" + user;
Path homeDirectory = new Path(path);
if (!fs.exists(homeDirectory)) {
LOG.info("Creating Home directory : " + homeDirectory);
fs.mkdirs(homeDirectory);
changePermission(user, homeDirectory, fs);
}
changePermission(user, homeDirectory, fs);
Path stagingArea = new Path(
conf.get("mapreduce.jobtracker.staging.root.dir",
"/tmp/hadoop/mapred/staging"));
LOG.info("Creating Staging root directory : " + stagingArea);
fs.mkdirs(stagingArea);
fs.setPermission(stagingArea, new FsPermission((short) 0777));
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
static void changePermission(String user, Path homeDirectory, FileSystem fs)
throws IOException {
fs.setOwner(homeDirectory, user, "");
}
}
| 4,330 | 36.017094 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.gridmix.DebugJobProducer.MockJob;
import org.apache.hadoop.mapred.gridmix.TestHighRamJob.DummyGridmixJob;
import org.apache.hadoop.mapred.gridmix.TestResourceUsageEmulators.FakeProgressive;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.TotalHeapUsageEmulatorPlugin;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.TotalHeapUsageEmulatorPlugin.DefaultHeapUsageEmulator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/**
* Test Gridmix memory emulation.
*/
public class TestGridmixMemoryEmulation {
/**
* This is a dummy class that fakes heap usage.
*/
private static class FakeHeapUsageEmulatorCore
extends DefaultHeapUsageEmulator {
private int numCalls = 0;
@Override
public void load(long sizeInMB) {
++numCalls;
super.load(sizeInMB);
}
// Get the total number of times load() was invoked
int getNumCalls() {
return numCalls;
}
// Get the total number of 1mb objects stored within
long getHeapUsageInMB() {
return heapSpace.size();
}
@Override
public void reset() {
// no op to stop emulate() from resetting
}
/**
* For re-testing purpose.
*/
void resetFake() {
numCalls = 0;
super.reset();
}
}
/**
* This is a dummy class that fakes the heap usage emulator plugin.
*/
private static class FakeHeapUsageEmulatorPlugin
extends TotalHeapUsageEmulatorPlugin {
private FakeHeapUsageEmulatorCore core;
public FakeHeapUsageEmulatorPlugin(FakeHeapUsageEmulatorCore core) {
super(core);
this.core = core;
}
@Override
protected long getMaxHeapUsageInMB() {
return Long.MAX_VALUE / ONE_MB;
}
@Override
protected long getTotalHeapUsageInMB() {
return core.getHeapUsageInMB();
}
}
/**
* Test {@link TotalHeapUsageEmulatorPlugin}'s core heap usage emulation
* engine.
*/
@Test
public void testHeapUsageEmulator() throws IOException {
FakeHeapUsageEmulatorCore heapEmulator = new FakeHeapUsageEmulatorCore();
long testSizeInMB = 10; // 10 mb
long previousHeap = heapEmulator.getHeapUsageInMB();
heapEmulator.load(testSizeInMB);
long currentHeap = heapEmulator.getHeapUsageInMB();
// check if the heap has increased by expected value
assertEquals("Default heap emulator failed to load 10mb",
previousHeap + testSizeInMB, currentHeap);
// test reset
heapEmulator.resetFake();
assertEquals("Default heap emulator failed to reset",
0, heapEmulator.getHeapUsageInMB());
}
/**
* Test {@link TotalHeapUsageEmulatorPlugin}.
*/
@Test
public void testTotalHeapUsageEmulatorPlugin() throws Exception {
Configuration conf = new Configuration();
// set the dummy resource calculator for testing
ResourceCalculatorPlugin monitor = new DummyResourceCalculatorPlugin();
long maxHeapUsage = 1024 * TotalHeapUsageEmulatorPlugin.ONE_MB; // 1GB
conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
maxHeapUsage);
monitor.setConf(conf);
// no buffer to be reserved
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO, 0F);
// only 1 call to be made per cycle
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO, 1F);
long targetHeapUsageInMB = 200; // 200mb
// fake progress indicator
FakeProgressive fakeProgress = new FakeProgressive();
// fake heap usage generator
FakeHeapUsageEmulatorCore fakeCore = new FakeHeapUsageEmulatorCore();
// a heap usage emulator with fake core
FakeHeapUsageEmulatorPlugin heapPlugin =
new FakeHeapUsageEmulatorPlugin(fakeCore);
// test with invalid or missing resource usage value
ResourceUsageMetrics invalidUsage =
TestResourceUsageEmulators.createMetrics(0);
heapPlugin.initialize(conf, invalidUsage, null, null);
// test if disabled heap emulation plugin's emulate() call is a no-operation
// this will test if the emulation plugin is disabled or not
int numCallsPre = fakeCore.getNumCalls();
long heapUsagePre = fakeCore.getHeapUsageInMB();
heapPlugin.emulate();
int numCallsPost = fakeCore.getNumCalls();
long heapUsagePost = fakeCore.getHeapUsageInMB();
// test if no calls are made heap usage emulator core
assertEquals("Disabled heap usage emulation plugin works!",
numCallsPre, numCallsPost);
// test if no calls are made heap usage emulator core
assertEquals("Disabled heap usage emulation plugin works!",
heapUsagePre, heapUsagePost);
// test with get progress
float progress = heapPlugin.getProgress();
assertEquals("Invalid progress of disabled cumulative heap usage emulation "
+ "plugin!", 1.0f, progress, 0f);
// test with wrong/invalid configuration
Boolean failed = null;
invalidUsage =
TestResourceUsageEmulators.createMetrics(maxHeapUsage
+ TotalHeapUsageEmulatorPlugin.ONE_MB);
try {
heapPlugin.initialize(conf, invalidUsage, monitor, null);
failed = false;
} catch (Exception e) {
failed = true;
}
assertNotNull("Fail case failure!", failed);
assertTrue("Expected failure!", failed);
// test with valid resource usage value
ResourceUsageMetrics metrics =
TestResourceUsageEmulators.createMetrics(targetHeapUsageInMB
* TotalHeapUsageEmulatorPlugin.ONE_MB);
// test with default emulation interval
// in every interval, the emulator will add 100% of the expected usage
// (since gridmix.emulators.resource-usage.heap.load-ratio=1)
// so at 10%, emulator will add 10% (difference), at 20% it will add 10% ...
// So to emulate 200MB, it will add
// 20mb + 20mb + 20mb + 20mb + .. = 200mb
testEmulationAccuracy(conf, fakeCore, monitor, metrics, heapPlugin, 200,
10);
// test with custom value for emulation interval of 20%
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,
0.2F);
// 40mb + 40mb + 40mb + 40mb + 40mb = 200mb
testEmulationAccuracy(conf, fakeCore, monitor, metrics, heapPlugin, 200, 5);
// test with custom value of free heap ratio and load ratio = 1
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO, 1F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO, 0.5F);
// 40mb + 0mb + 80mb + 0mb + 0mb = 120mb
testEmulationAccuracy(conf, fakeCore, monitor, metrics, heapPlugin, 120, 2);
// test with custom value of heap load ratio and min free heap ratio = 0
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO, 0.5F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO, 0F);
// 20mb (call#1) + 20mb (call#1) + 20mb (call#2) + 20mb (call#2) +.. = 200mb
testEmulationAccuracy(conf, fakeCore, monitor, metrics, heapPlugin, 200,
10);
// test with custom value of free heap ratio = 0.3 and load ratio = 0.5
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO, 0.25F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO, 0.5F);
// 20mb (call#1) + 20mb (call#1) + 30mb (call#2) + 0mb (call#2)
// + 30mb (call#3) + 0mb (call#3) + 35mb (call#4) + 0mb (call#4)
// + 37mb (call#5) + 0mb (call#5) = 162mb
testEmulationAccuracy(conf, fakeCore, monitor, metrics, heapPlugin, 162, 6);
// test if emulation interval boundary is respected
fakeProgress = new FakeProgressive(); // initialize
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO, 0F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO, 1F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,
0.25F);
heapPlugin.initialize(conf, metrics, monitor, fakeProgress);
fakeCore.resetFake();
// take a snapshot after the initialization
long initHeapUsage = fakeCore.getHeapUsageInMB();
long initNumCallsUsage = fakeCore.getNumCalls();
// test with 0 progress
testEmulationBoundary(0F, fakeCore, fakeProgress, heapPlugin, initHeapUsage,
initNumCallsUsage, "[no-op, 0 progress]");
// test with 24% progress
testEmulationBoundary(0.24F, fakeCore, fakeProgress, heapPlugin,
initHeapUsage, initNumCallsUsage,
"[no-op, 24% progress]");
// test with 25% progress
testEmulationBoundary(0.25F, fakeCore, fakeProgress, heapPlugin,
targetHeapUsageInMB / 4, 1, "[op, 25% progress]");
// test with 80% progress
testEmulationBoundary(0.80F, fakeCore, fakeProgress, heapPlugin,
(targetHeapUsageInMB * 4) / 5, 2, "[op, 80% progress]");
// now test if the final call with 100% progress ramps up the heap usage
testEmulationBoundary(1F, fakeCore, fakeProgress, heapPlugin,
targetHeapUsageInMB, 3, "[op, 100% progress]");
}
// test whether the heap usage emulator achieves the desired target using
// desired calls to the underling core engine.
private static void testEmulationAccuracy(Configuration conf,
FakeHeapUsageEmulatorCore fakeCore,
ResourceCalculatorPlugin monitor,
ResourceUsageMetrics metrics,
TotalHeapUsageEmulatorPlugin heapPlugin,
long expectedTotalHeapUsageInMB,
long expectedTotalNumCalls)
throws Exception {
FakeProgressive fakeProgress = new FakeProgressive();
fakeCore.resetFake();
heapPlugin.initialize(conf, metrics, monitor, fakeProgress);
int numLoops = 0;
while (fakeProgress.getProgress() < 1) {
++numLoops;
float progress = numLoops / 100.0F;
fakeProgress.setProgress(progress);
heapPlugin.emulate();
}
// test if the resource plugin shows the expected usage
assertEquals("Cumulative heap usage emulator plugin failed (total usage)!",
expectedTotalHeapUsageInMB, fakeCore.getHeapUsageInMB(), 1L);
// test if the resource plugin shows the expected num calls
assertEquals("Cumulative heap usage emulator plugin failed (num calls)!",
expectedTotalNumCalls, fakeCore.getNumCalls(), 0L);
}
// tests if the heap usage emulation plugin emulates only at the expected
// progress gaps
private static void testEmulationBoundary(float progress,
FakeHeapUsageEmulatorCore fakeCore, FakeProgressive fakeProgress,
TotalHeapUsageEmulatorPlugin heapPlugin, long expectedTotalHeapUsageInMB,
long expectedTotalNumCalls, String info) throws Exception {
fakeProgress.setProgress(progress);
heapPlugin.emulate();
// test heap usage
assertEquals("Emulation interval test for heap usage failed " + info + "!",
expectedTotalHeapUsageInMB, fakeCore.getHeapUsageInMB(), 0L);
// test num calls
assertEquals("Emulation interval test for heap usage failed " + info + "!",
expectedTotalNumCalls, fakeCore.getNumCalls(), 0L);
}
/**
* Test the specified task java heap options.
*/
@SuppressWarnings("deprecation")
private void testJavaHeapOptions(String mapOptions,
String reduceOptions, String taskOptions, String defaultMapOptions,
String defaultReduceOptions, String defaultTaskOptions,
String expectedMapOptions, String expectedReduceOptions,
String expectedTaskOptions) throws Exception {
Configuration simulatedConf = new Configuration();
// reset the configuration parameters
simulatedConf.unset(MRJobConfig.MAP_JAVA_OPTS);
simulatedConf.unset(MRJobConfig.REDUCE_JAVA_OPTS);
simulatedConf.unset(JobConf.MAPRED_TASK_JAVA_OPTS);
// set the default map task options
if (defaultMapOptions != null) {
simulatedConf.set(MRJobConfig.MAP_JAVA_OPTS, defaultMapOptions);
}
// set the default reduce task options
if (defaultReduceOptions != null) {
simulatedConf.set(MRJobConfig.REDUCE_JAVA_OPTS, defaultReduceOptions);
}
// set the default task options
if (defaultTaskOptions != null) {
simulatedConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, defaultTaskOptions);
}
Configuration originalConf = new Configuration();
// reset the configuration parameters
originalConf.unset(MRJobConfig.MAP_JAVA_OPTS);
originalConf.unset(MRJobConfig.REDUCE_JAVA_OPTS);
originalConf.unset(JobConf.MAPRED_TASK_JAVA_OPTS);
// set the map task options
if (mapOptions != null) {
originalConf.set(MRJobConfig.MAP_JAVA_OPTS, mapOptions);
}
// set the reduce task options
if (reduceOptions != null) {
originalConf.set(MRJobConfig.REDUCE_JAVA_OPTS, reduceOptions);
}
// set the task options
if (taskOptions != null) {
originalConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, taskOptions);
}
// configure the task jvm's heap options
GridmixJob.configureTaskJVMOptions(originalConf, simulatedConf);
assertEquals("Map heap options mismatch!", expectedMapOptions,
simulatedConf.get(MRJobConfig.MAP_JAVA_OPTS));
assertEquals("Reduce heap options mismatch!", expectedReduceOptions,
simulatedConf.get(MRJobConfig.REDUCE_JAVA_OPTS));
assertEquals("Task heap options mismatch!", expectedTaskOptions,
simulatedConf.get(JobConf.MAPRED_TASK_JAVA_OPTS));
}
/**
* Test task-level java heap options configuration in {@link GridmixJob}.
*/
@Test
public void testJavaHeapOptions() throws Exception {
// test missing opts
testJavaHeapOptions(null, null, null, null, null, null, null, null,
null);
// test original heap opts and missing default opts
testJavaHeapOptions("-Xms10m", "-Xms20m", "-Xms30m", null, null, null,
null, null, null);
// test missing opts with default opts
testJavaHeapOptions(null, null, null, "-Xms10m", "-Xms20m", "-Xms30m",
"-Xms10m", "-Xms20m", "-Xms30m");
// test empty option
testJavaHeapOptions("", "", "", null, null, null, null, null, null);
// test empty default option and no original heap options
testJavaHeapOptions(null, null, null, "", "", "", "", "", "");
// test empty opts and default opts
testJavaHeapOptions("", "", "", "-Xmx10m -Xms1m", "-Xmx50m -Xms2m",
"-Xms2m -Xmx100m", "-Xmx10m -Xms1m", "-Xmx50m -Xms2m",
"-Xms2m -Xmx100m");
// test custom heap opts with no default opts
testJavaHeapOptions("-Xmx10m", "-Xmx20m", "-Xmx30m", null, null, null,
"-Xmx10m", "-Xmx20m", "-Xmx30m");
// test heap opts with default opts (multiple value)
testJavaHeapOptions("-Xms5m -Xmx200m", "-Xms15m -Xmx300m",
"-Xms25m -Xmx50m", "-XXabc", "-XXxyz", "-XXdef",
"-XXabc -Xmx200m", "-XXxyz -Xmx300m", "-XXdef -Xmx50m");
// test heap opts with default opts (duplication of -Xmx)
testJavaHeapOptions("-Xms5m -Xmx200m", "-Xms15m -Xmx300m",
"-Xms25m -Xmx50m", "-XXabc -Xmx500m", "-XXxyz -Xmx600m",
"-XXdef -Xmx700m", "-XXabc -Xmx200m", "-XXxyz -Xmx300m",
"-XXdef -Xmx50m");
// test heap opts with default opts (single value)
testJavaHeapOptions("-Xmx10m", "-Xmx20m", "-Xmx50m", "-Xms2m",
"-Xms3m", "-Xms5m", "-Xms2m -Xmx10m", "-Xms3m -Xmx20m",
"-Xms5m -Xmx50m");
// test heap opts with default opts (duplication of -Xmx)
testJavaHeapOptions("-Xmx10m", "-Xmx20m", "-Xmx50m", "-Xmx2m",
"-Xmx3m", "-Xmx5m", "-Xmx10m", "-Xmx20m", "-Xmx50m");
}
/**
* Test disabled task heap options configuration in {@link GridmixJob}.
*/
@Test
@SuppressWarnings("deprecation")
public void testJavaHeapOptionsDisabled() throws Exception {
Configuration gridmixConf = new Configuration();
gridmixConf.setBoolean(GridmixJob.GRIDMIX_TASK_JVM_OPTIONS_ENABLE, false);
// set the default values of simulated job
gridmixConf.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx1m");
gridmixConf.set(MRJobConfig.REDUCE_JAVA_OPTS, "-Xmx2m");
gridmixConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, "-Xmx3m");
// set the default map and reduce task options for original job
final JobConf originalConf = new JobConf();
originalConf.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx10m");
originalConf.set(MRJobConfig.REDUCE_JAVA_OPTS, "-Xmx20m");
originalConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, "-Xmx30m");
// define a mock job
MockJob story = new MockJob(originalConf) {
public JobConf getJobConf() {
return originalConf;
}
};
GridmixJob job = new DummyGridmixJob(gridmixConf, story);
Job simulatedJob = job.getJob();
Configuration simulatedConf = simulatedJob.getConfiguration();
assertEquals("Map heap options works when disabled!", "-Xmx1m",
simulatedConf.get(MRJobConfig.MAP_JAVA_OPTS));
assertEquals("Reduce heap options works when disabled!", "-Xmx2m",
simulatedConf.get(MRJobConfig.REDUCE_JAVA_OPTS));
assertEquals("Task heap options works when disabled!", "-Xmx3m",
simulatedConf.get(JobConf.MAPRED_TASK_JAVA_OPTS));
}
}
| 18,946 | 40.368996 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapred.gridmix.CompressionEmulationUtil.RandomTextDataMapper;
import org.apache.hadoop.mapred.gridmix.GenerateData.GenSplit;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import static org.junit.Assert.*;
import org.junit.Test;
/**
* Test {@link CompressionEmulationUtil}
*/
public class TestCompressionEmulationUtils {
//TODO Remove this once LocalJobRunner can run Gridmix.
static class CustomInputFormat extends GenerateData.GenDataFormat {
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
// get the total data to be generated
long toGen =
jobCtxt.getConfiguration().getLong(GenerateData.GRIDMIX_GEN_BYTES, -1);
if (toGen < 0) {
throw new IOException("Invalid/missing generation bytes: " + toGen);
}
// get the total number of mappers configured
int totalMappersConfigured =
jobCtxt.getConfiguration().getInt(MRJobConfig.NUM_MAPS, -1);
if (totalMappersConfigured < 0) {
throw new IOException("Invalid/missing num mappers: "
+ totalMappersConfigured);
}
final long bytesPerTracker = toGen / totalMappersConfigured;
final ArrayList<InputSplit> splits =
new ArrayList<InputSplit>(totalMappersConfigured);
for (int i = 0; i < totalMappersConfigured; ++i) {
splits.add(new GenSplit(bytesPerTracker,
new String[] { "tracker_local" }));
}
return splits;
}
}
/**
* Test {@link RandomTextDataMapper} via {@link CompressionEmulationUtil}.
*/
@Test
public void testRandomCompressedTextDataGenerator() throws Exception {
int wordSize = 10;
int listSize = 20;
long dataSize = 10*1024*1024;
Configuration conf = new Configuration();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
// configure the RandomTextDataGenerator to generate desired sized data
conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE,
listSize);
conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE,
wordSize);
conf.setLong(GenerateData.GRIDMIX_GEN_BYTES, dataSize);
conf.set("mapreduce.job.hdfs-servers", "");
FileSystem lfs = FileSystem.getLocal(conf);
// define the test's root temp directory
Path rootTempDir =
new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
lfs.getUri(), lfs.getWorkingDirectory());
Path tempDir = new Path(rootTempDir, "TestRandomCompressedTextDataGenr");
lfs.delete(tempDir, true);
runDataGenJob(conf, tempDir);
// validate the output data
FileStatus[] files =
lfs.listStatus(tempDir, new Utils.OutputFileUtils.OutputFilesFilter());
long size = 0;
long maxLineSize = 0;
for (FileStatus status : files) {
InputStream in =
CompressionEmulationUtil
.getPossiblyDecompressedInputStream(status.getPath(), conf, 0);
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
String line = reader.readLine();
if (line != null) {
long lineSize = line.getBytes().length;
if (lineSize > maxLineSize) {
maxLineSize = lineSize;
}
while (line != null) {
for (String word : line.split("\\s")) {
size += word.getBytes().length;
}
line = reader.readLine();
}
}
reader.close();
}
assertTrue(size >= dataSize);
assertTrue(size <= dataSize + maxLineSize);
}
/**
* Runs a GridMix data-generation job.
*/
private static void runDataGenJob(Configuration conf, Path tempDir)
throws IOException, ClassNotFoundException, InterruptedException {
JobClient client = new JobClient(conf);
// get the local job runner
conf.setInt(MRJobConfig.NUM_MAPS, 1);
Job job = Job.getInstance(conf);
CompressionEmulationUtil.configure(job);
job.setInputFormatClass(CustomInputFormat.class);
// set the output path
FileOutputFormat.setOutputPath(job, tempDir);
// submit and wait for completion
job.submit();
int ret = job.waitForCompletion(true) ? 0 : 1;
assertEquals("Job Failed", 0, ret);
}
/**
* Test if {@link RandomTextDataGenerator} can generate random text data
* with the desired compression ratio. This involves
* - using {@link CompressionEmulationUtil} to configure the MR job for
* generating the random text data with the desired compression ratio
* - running the MR job
* - test {@link RandomTextDataGenerator}'s output and match the output size
* (compressed) with the expected compression ratio.
*/
private void testCompressionRatioConfigure(float ratio)
throws Exception {
long dataSize = 10*1024*1024;
Configuration conf = new Configuration();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
conf.setLong(GenerateData.GRIDMIX_GEN_BYTES, dataSize);
conf.set("mapreduce.job.hdfs-servers", "");
float expectedRatio = CompressionEmulationUtil.DEFAULT_COMPRESSION_RATIO;
if (ratio > 0) {
// set the compression ratio in the conf
CompressionEmulationUtil.setMapInputCompressionEmulationRatio(conf, ratio);
expectedRatio =
CompressionEmulationUtil.standardizeCompressionRatio(ratio);
}
// invoke the utility to map from ratio to word-size
CompressionEmulationUtil.setupDataGeneratorConfig(conf);
FileSystem lfs = FileSystem.getLocal(conf);
// define the test's root temp directory
Path rootTempDir =
new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
lfs.getUri(), lfs.getWorkingDirectory());
Path tempDir =
new Path(rootTempDir, "TestCustomRandomCompressedTextDataGenr");
lfs.delete(tempDir, true);
runDataGenJob(conf, tempDir);
// validate the output data
FileStatus[] files =
lfs.listStatus(tempDir, new Utils.OutputFileUtils.OutputFilesFilter());
long size = 0;
for (FileStatus status : files) {
size += status.getLen();
}
float compressionRatio = ((float)size)/dataSize;
float stdRatio =
CompressionEmulationUtil.standardizeCompressionRatio(compressionRatio);
assertEquals(expectedRatio, stdRatio, 0.0D);
}
/**
* Test compression ratio with multiple compression ratios.
*/
@Test
public void testCompressionRatios() throws Exception {
// test default compression ratio i.e 0.5
testCompressionRatioConfigure(0F);
// test for a sample compression ratio of 0.2
testCompressionRatioConfigure(0.2F);
// test for a sample compression ratio of 0.4
testCompressionRatioConfigure(0.4F);
// test for a sample compression ratio of 0.65
testCompressionRatioConfigure(0.65F);
// test for a compression ratio of 0.682 which should be standardized
// to round(0.682) i.e 0.68
testCompressionRatioConfigure(0.682F);
// test for a compression ratio of 0.567 which should be standardized
// to round(0.567) i.e 0.57
testCompressionRatioConfigure(0.567F);
// test with a compression ratio of 0.01 which less than the min supported
// value of 0.07
boolean failed = false;
try {
testCompressionRatioConfigure(0.01F);
} catch (RuntimeException re) {
failed = true;
}
assertTrue("Compression ratio min value (0.07) check failed!", failed);
// test with a compression ratio of 0.01 which less than the max supported
// value of 0.68
failed = false;
try {
testCompressionRatioConfigure(0.7F);
} catch (RuntimeException re) {
failed = true;
}
assertTrue("Compression ratio max value (0.68) check failed!", failed);
}
/**
* Test compression ratio standardization.
*/
@Test
public void testCompressionRatioStandardization() throws Exception {
assertEquals(0.55F,
CompressionEmulationUtil.standardizeCompressionRatio(0.55F), 0.0D);
assertEquals(0.65F,
CompressionEmulationUtil.standardizeCompressionRatio(0.652F), 0.0D);
assertEquals(0.78F,
CompressionEmulationUtil.standardizeCompressionRatio(0.777F), 0.0D);
assertEquals(0.86F,
CompressionEmulationUtil.standardizeCompressionRatio(0.855F), 0.0D);
}
/**
* Test map input compression ratio configuration utilities.
*/
@Test
public void testInputCompressionRatioConfiguration() throws Exception {
Configuration conf = new Configuration();
float ratio = 0.567F;
CompressionEmulationUtil.setMapInputCompressionEmulationRatio(conf, ratio);
assertEquals(ratio,
CompressionEmulationUtil.getMapInputCompressionEmulationRatio(conf),
0.0D);
}
/**
* Test map output compression ratio configuration utilities.
*/
@Test
public void testIntermediateCompressionRatioConfiguration()
throws Exception {
Configuration conf = new Configuration();
float ratio = 0.567F;
CompressionEmulationUtil.setMapOutputCompressionEmulationRatio(conf, ratio);
assertEquals(ratio,
CompressionEmulationUtil.getMapOutputCompressionEmulationRatio(conf),
0.0D);
}
/**
* Test reduce output compression ratio configuration utilities.
*/
@Test
public void testOutputCompressionRatioConfiguration() throws Exception {
Configuration conf = new Configuration();
float ratio = 0.567F;
CompressionEmulationUtil.setJobOutputCompressionEmulationRatio(conf, ratio);
assertEquals(ratio,
CompressionEmulationUtil.getJobOutputCompressionEmulationRatio(conf),
0.0D);
}
/**
* Test compressible {@link GridmixRecord}.
*/
@Test
public void testCompressibleGridmixRecord() throws IOException {
JobConf conf = new JobConf();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
FileSystem lfs = FileSystem.getLocal(conf);
int dataSize = 1024 * 1024 * 10; // 10 MB
float ratio = 0.357F;
// define the test's root temp directory
Path rootTempDir =
new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
lfs.getUri(), lfs.getWorkingDirectory());
Path tempDir = new Path(rootTempDir,
"TestPossiblyCompressibleGridmixRecord");
lfs.delete(tempDir, true);
// define a compressible GridmixRecord
GridmixRecord record = new GridmixRecord(dataSize, 0);
record.setCompressibility(true, ratio); // enable compression
conf.setClass(FileOutputFormat.COMPRESS_CODEC, GzipCodec.class,
CompressionCodec.class);
org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
// write the record to a file
Path recordFile = new Path(tempDir, "record");
OutputStream outStream = CompressionEmulationUtil
.getPossiblyCompressedOutputStream(recordFile,
conf);
DataOutputStream out = new DataOutputStream(outStream);
record.write(out);
out.close();
outStream.close();
// open the compressed stream for reading
Path actualRecordFile = recordFile.suffix(".gz");
InputStream in =
CompressionEmulationUtil
.getPossiblyDecompressedInputStream(actualRecordFile, conf, 0);
// get the compressed file size
long compressedFileSize = lfs.listStatus(actualRecordFile)[0].getLen();
GridmixRecord recordRead = new GridmixRecord();
recordRead.readFields(new DataInputStream(in));
assertEquals("Record size mismatch in a compressible GridmixRecord",
dataSize, recordRead.getSize());
assertTrue("Failed to generate a compressible GridmixRecord",
recordRead.getSize() > compressedFileSize);
// check if the record can generate data with the desired compression ratio
float seenRatio = ((float)compressedFileSize)/dataSize;
assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio),
CompressionEmulationUtil.standardizeCompressionRatio(seenRatio), 1.0D);
}
/**
* Test
* {@link CompressionEmulationUtil#isCompressionEmulationEnabled(
* org.apache.hadoop.conf.Configuration)}.
*/
@Test
public void testIsCompressionEmulationEnabled() {
Configuration conf = new Configuration();
// Check default values
assertTrue(CompressionEmulationUtil.isCompressionEmulationEnabled(conf));
// Check disabled
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, false);
assertFalse(CompressionEmulationUtil.isCompressionEmulationEnabled(conf));
// Check enabled
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
assertTrue(CompressionEmulationUtil.isCompressionEmulationEnabled(conf));
}
/**
* Test
* {@link CompressionEmulationUtil#getPossiblyDecompressedInputStream(Path,
* Configuration, long)}
* and
* {@link CompressionEmulationUtil#getPossiblyCompressedOutputStream(Path,
* Configuration)}.
*/
@Test
public void testPossiblyCompressedDecompressedStreams() throws IOException {
JobConf conf = new JobConf();
FileSystem lfs = FileSystem.getLocal(conf);
String inputLine = "Hi Hello!";
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
conf.setBoolean(FileOutputFormat.COMPRESS, true);
conf.setClass(FileOutputFormat.COMPRESS_CODEC, GzipCodec.class,
CompressionCodec.class);
// define the test's root temp directory
Path rootTempDir =
new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
lfs.getUri(), lfs.getWorkingDirectory());
Path tempDir =
new Path(rootTempDir, "TestPossiblyCompressedDecompressedStreams");
lfs.delete(tempDir, true);
// create a compressed file
Path compressedFile = new Path(tempDir, "test");
OutputStream out =
CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile,
conf);
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
writer.write(inputLine);
writer.close();
// now read back the data from the compressed stream
compressedFile = compressedFile.suffix(".gz");
InputStream in =
CompressionEmulationUtil
.getPossiblyDecompressedInputStream(compressedFile, conf, 0);
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
String readLine = reader.readLine();
assertEquals("Compression/Decompression error", inputLine, readLine);
reader.close();
}
/**
* Test if
* {@link CompressionEmulationUtil#configureCompressionEmulation(
* org.apache.hadoop.mapred.JobConf, org.apache.hadoop.mapred.JobConf)}
* can extract compression related configuration parameters.
*/
@Test
public void testExtractCompressionConfigs() {
JobConf source = new JobConf();
JobConf target = new JobConf();
// set the default values
source.setBoolean(FileOutputFormat.COMPRESS, false);
source.set(FileOutputFormat.COMPRESS_CODEC, "MyDefaultCodec");
source.set(FileOutputFormat.COMPRESS_TYPE, "MyDefaultType");
source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, false);
source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC, "MyDefaultCodec2");
CompressionEmulationUtil.configureCompressionEmulation(source, target);
// check default values
assertFalse(target.getBoolean(FileOutputFormat.COMPRESS, true));
assertEquals("MyDefaultCodec", target.get(FileOutputFormat.COMPRESS_CODEC));
assertEquals("MyDefaultType", target.get(FileOutputFormat.COMPRESS_TYPE));
assertFalse(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true));
assertEquals("MyDefaultCodec2",
target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC));
assertFalse(CompressionEmulationUtil
.isInputCompressionEmulationEnabled(target));
// set new values
source.setBoolean(FileOutputFormat.COMPRESS, true);
source.set(FileOutputFormat.COMPRESS_CODEC, "MyCodec");
source.set(FileOutputFormat.COMPRESS_TYPE, "MyType");
source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC, "MyCodec2");
org.apache.hadoop.mapred.FileInputFormat.setInputPaths(source, "file.gz");
target = new JobConf(); // reset
CompressionEmulationUtil.configureCompressionEmulation(source, target);
// check new values
assertTrue(target.getBoolean(FileOutputFormat.COMPRESS, false));
assertEquals("MyCodec", target.get(FileOutputFormat.COMPRESS_CODEC));
assertEquals("MyType", target.get(FileOutputFormat.COMPRESS_TYPE));
assertTrue(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, false));
assertEquals("MyCodec2",
target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC));
assertTrue(CompressionEmulationUtil
.isInputCompressionEmulationEnabled(target));
}
/**
* Test of {@link FileQueue} can identify compressed file and provide
* readers to extract uncompressed data only if input-compression is enabled.
*/
@Test
public void testFileQueueDecompression() throws IOException {
JobConf conf = new JobConf();
FileSystem lfs = FileSystem.getLocal(conf);
String inputLine = "Hi Hello!";
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
org.apache.hadoop.mapred.FileOutputFormat.setOutputCompressorClass(conf,
GzipCodec.class);
// define the test's root temp directory
Path rootTempDir =
new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
lfs.getUri(), lfs.getWorkingDirectory());
Path tempDir = new Path(rootTempDir, "TestFileQueueDecompression");
lfs.delete(tempDir, true);
// create a compressed file
Path compressedFile = new Path(tempDir, "test");
OutputStream out =
CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile,
conf);
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
writer.write(inputLine);
writer.close();
compressedFile = compressedFile.suffix(".gz");
// now read back the data from the compressed stream using FileQueue
long fileSize = lfs.listStatus(compressedFile)[0].getLen();
CombineFileSplit split =
new CombineFileSplit(new Path[] {compressedFile}, new long[] {fileSize});
FileQueue queue = new FileQueue(split, conf);
byte[] bytes = new byte[inputLine.getBytes().length];
queue.read(bytes);
queue.close();
String readLine = new String(bytes);
assertEquals("Compression/Decompression error", inputLine, readLine);
}
/**
* Tests the computation logic of uncompressed input bytes by
* {@link LoadJob#getUncompressedInputBytes(long, Configuration)}
*/
@Test
public void testComputeUncompressedInputBytes() {
long possiblyCompressedInputBytes = 100000;
float compressionRatio = 0.45F;
Configuration conf = new Configuration();
CompressionEmulationUtil.setMapInputCompressionEmulationRatio(conf,
compressionRatio);
// By default, input compression emulation is diabled. Verify the
// computation of uncompressed input bytes.
long result = CompressionEmulationUtil.getUncompressedInputBytes(
possiblyCompressedInputBytes, conf);
assertEquals(possiblyCompressedInputBytes, result);
// Enable input compression emulation and verify uncompressed
// input bytes computation logic
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
result = CompressionEmulationUtil.getUncompressedInputBytes(
possiblyCompressedInputBytes, conf);
assertEquals((long)(possiblyCompressedInputBytes/compressionRatio), result);
}
}
| 22,639 | 37.568995 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/CommonJobTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapred.TaskReport;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.tools.rumen.TaskInfo;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.util.ToolRunner;
public class CommonJobTest {
public static final Log LOG = LogFactory.getLog(Gridmix.class);
protected static int NJOBS = 2;
protected static final long GENDATA = 1; // in megabytes
protected static GridmixJobSubmissionPolicy policy = GridmixJobSubmissionPolicy.REPLAY;
private static File workspace = new File("target" + File.separator
+ TestGridmixSubmission.class.getName() + "-test");
static class DebugGridmix extends Gridmix {
private JobFactory<?> factory;
private TestMonitor monitor;
@Override
protected JobMonitor createJobMonitor(Statistics stats, Configuration conf)
throws IOException {
monitor = new TestMonitor(3, stats);
return monitor;
}
@Override
protected JobFactory<?> createJobFactory(JobSubmitter submitter,
String traceIn, Path scratchDir, Configuration conf,
CountDownLatch startFlag, UserResolver userResolver) throws IOException {
factory = DebugJobFactory.getFactory(submitter, scratchDir, NJOBS, conf,
startFlag, userResolver);
return factory;
}
public void checkMonitor() throws Exception {
monitor.verify(((DebugJobFactory.Debuggable) factory).getSubmitted());
}
}
static class TestMonitor extends JobMonitor {
private final BlockingQueue<Job> retiredJobs;
private final int expected;
static final long SLOPBYTES = 1024;
public TestMonitor(int expected, Statistics stats) {
super(3, TimeUnit.SECONDS, stats, 1);
this.expected = expected;
retiredJobs = new LinkedBlockingQueue<Job>();
}
@Override
protected void onSuccess(Job job) {
LOG.info(" Job Success " + job);
retiredJobs.add(job);
}
@Override
protected void onFailure(Job job) {
fail("Job failure: " + job);
}
public void verify(ArrayList<JobStory> submitted) throws Exception {
assertEquals("Bad job count", expected, retiredJobs.size());
final ArrayList<Job> succeeded = new ArrayList<Job>();
assertEquals("Bad job count", expected, retiredJobs.drainTo(succeeded));
final HashMap<String, JobStory> sub = new HashMap<String, JobStory>();
for (JobStory spec : submitted) {
sub.put(spec.getJobID().toString(), spec);
}
for (Job job : succeeded) {
final String jobName = job.getJobName();
Configuration configuration = job.getConfiguration();
if (GenerateData.JOB_NAME.equals(jobName)) {
RemoteIterator<LocatedFileStatus> rit = GridmixTestUtils.dfs
.listFiles(new Path("/"), true);
while (rit.hasNext()) {
System.out.println(rit.next().toString());
}
final Path in = new Path("foo").makeQualified(
GridmixTestUtils.dfs.getUri(),
GridmixTestUtils.dfs.getWorkingDirectory());
// data was compressed. All files = compressed test size+ logs= 1000000/2 + logs
final ContentSummary generated = GridmixTestUtils.dfs
.getContentSummary(in);
assertEquals(550000, generated.getLength(), 10000);
Counter counter = job.getCounters()
.getGroup("org.apache.hadoop.mapreduce.FileSystemCounter")
.findCounter("HDFS_BYTES_WRITTEN");
assertEquals(generated.getLength(), counter.getValue());
continue;
} else if (GenerateDistCacheData.JOB_NAME.equals(jobName)) {
continue;
}
final String originalJobId = configuration.get(Gridmix.ORIGINAL_JOB_ID);
final JobStory spec = sub.get(originalJobId);
assertNotNull("No spec for " + jobName, spec);
assertNotNull("No counters for " + jobName, job.getCounters());
final String originalJobName = spec.getName();
System.out.println("originalJobName=" + originalJobName
+ ";GridmixJobName=" + jobName + ";originalJobID=" + originalJobId);
assertTrue("Original job name is wrong.",
originalJobName.equals(configuration.get(Gridmix.ORIGINAL_JOB_NAME)));
// Gridmix job seqNum contains 6 digits
int seqNumLength = 6;
String jobSeqNum = new DecimalFormat("000000").format(configuration.getInt(
GridmixJob.GRIDMIX_JOB_SEQ, -1));
// Original job name is of the format MOCKJOB<6 digit sequence number>
// because MockJob jobNames are of this format.
assertTrue(originalJobName.substring(
originalJobName.length() - seqNumLength).equals(jobSeqNum));
assertTrue("Gridmix job name is not in the expected format.",
jobName.equals(GridmixJob.JOB_NAME_PREFIX + jobSeqNum));
final FileStatus stat = GridmixTestUtils.dfs.getFileStatus(new Path(
GridmixTestUtils.DEST, "" + Integer.valueOf(jobSeqNum)));
assertEquals("Wrong owner for " + jobName, spec.getUser(),
stat.getOwner());
final int nMaps = spec.getNumberMaps();
final int nReds = spec.getNumberReduces();
final JobClient client = new JobClient(
GridmixTestUtils.mrvl.getConfig());
final TaskReport[] mReports = client.getMapTaskReports(JobID
.downgrade(job.getJobID()));
assertEquals("Mismatched map count", nMaps, mReports.length);
check(TaskType.MAP, spec, mReports, 0, 0, SLOPBYTES, nReds);
final TaskReport[] rReports = client.getReduceTaskReports(JobID
.downgrade(job.getJobID()));
assertEquals("Mismatched reduce count", nReds, rReports.length);
check(TaskType.REDUCE, spec, rReports, nMaps * SLOPBYTES, 2 * nMaps, 0,
0);
}
}
// Verify if correct job queue is used
private void check(final TaskType type, JobStory spec,
final TaskReport[] runTasks, long extraInputBytes,
int extraInputRecords, long extraOutputBytes, int extraOutputRecords)
throws Exception {
long[] runInputRecords = new long[runTasks.length];
long[] runInputBytes = new long[runTasks.length];
long[] runOutputRecords = new long[runTasks.length];
long[] runOutputBytes = new long[runTasks.length];
long[] specInputRecords = new long[runTasks.length];
long[] specInputBytes = new long[runTasks.length];
long[] specOutputRecords = new long[runTasks.length];
long[] specOutputBytes = new long[runTasks.length];
for (int i = 0; i < runTasks.length; ++i) {
final TaskInfo specInfo;
final Counters counters = runTasks[i].getCounters();
switch (type) {
case MAP:
runInputBytes[i] = counters.findCounter("FileSystemCounters",
"HDFS_BYTES_READ").getValue()
- counters.findCounter(TaskCounter.SPLIT_RAW_BYTES).getValue();
runInputRecords[i] = (int) counters.findCounter(
TaskCounter.MAP_INPUT_RECORDS).getValue();
runOutputBytes[i] = counters
.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue();
runOutputRecords[i] = (int) counters.findCounter(
TaskCounter.MAP_OUTPUT_RECORDS).getValue();
specInfo = spec.getTaskInfo(TaskType.MAP, i);
specInputRecords[i] = specInfo.getInputRecords();
specInputBytes[i] = specInfo.getInputBytes();
specOutputRecords[i] = specInfo.getOutputRecords();
specOutputBytes[i] = specInfo.getOutputBytes();
LOG.info(String.format(type + " SPEC: %9d -> %9d :: %5d -> %5d\n",
specInputBytes[i], specOutputBytes[i], specInputRecords[i],
specOutputRecords[i]));
LOG.info(String.format(type + " RUN: %9d -> %9d :: %5d -> %5d\n",
runInputBytes[i], runOutputBytes[i], runInputRecords[i],
runOutputRecords[i]));
break;
case REDUCE:
runInputBytes[i] = 0;
runInputRecords[i] = (int) counters.findCounter(
TaskCounter.REDUCE_INPUT_RECORDS).getValue();
runOutputBytes[i] = counters.findCounter("FileSystemCounters",
"HDFS_BYTES_WRITTEN").getValue();
runOutputRecords[i] = (int) counters.findCounter(
TaskCounter.REDUCE_OUTPUT_RECORDS).getValue();
specInfo = spec.getTaskInfo(TaskType.REDUCE, i);
// There is no reliable counter for reduce input bytes. The
// variable-length encoding of intermediate records and other noise
// make this quantity difficult to estimate. The shuffle and spec
// input bytes are included in debug output for reference, but are
// not checked
specInputBytes[i] = 0;
specInputRecords[i] = specInfo.getInputRecords();
specOutputRecords[i] = specInfo.getOutputRecords();
specOutputBytes[i] = specInfo.getOutputBytes();
LOG.info(String.format(type + " SPEC: (%9d) -> %9d :: %5d -> %5d\n",
specInfo.getInputBytes(), specOutputBytes[i],
specInputRecords[i], specOutputRecords[i]));
LOG.info(String
.format(type + " RUN: (%9d) -> %9d :: %5d -> %5d\n", counters
.findCounter(TaskCounter.REDUCE_SHUFFLE_BYTES).getValue(),
runOutputBytes[i], runInputRecords[i], runOutputRecords[i]));
break;
default:
fail("Unexpected type: " + type);
}
}
// Check input bytes
Arrays.sort(specInputBytes);
Arrays.sort(runInputBytes);
for (int i = 0; i < runTasks.length; ++i) {
assertTrue("Mismatched " + type + " input bytes " + specInputBytes[i]
+ "/" + runInputBytes[i],
eqPlusMinus(runInputBytes[i], specInputBytes[i], extraInputBytes));
}
// Check input records
Arrays.sort(specInputRecords);
Arrays.sort(runInputRecords);
for (int i = 0; i < runTasks.length; ++i) {
assertTrue(
"Mismatched " + type + " input records " + specInputRecords[i]
+ "/" + runInputRecords[i],
eqPlusMinus(runInputRecords[i], specInputRecords[i],
extraInputRecords));
}
// Check output bytes
Arrays.sort(specOutputBytes);
Arrays.sort(runOutputBytes);
for (int i = 0; i < runTasks.length; ++i) {
assertTrue(
"Mismatched " + type + " output bytes " + specOutputBytes[i] + "/"
+ runOutputBytes[i],
eqPlusMinus(runOutputBytes[i], specOutputBytes[i], extraOutputBytes));
}
// Check output records
Arrays.sort(specOutputRecords);
Arrays.sort(runOutputRecords);
for (int i = 0; i < runTasks.length; ++i) {
assertTrue(
"Mismatched " + type + " output records " + specOutputRecords[i]
+ "/" + runOutputRecords[i],
eqPlusMinus(runOutputRecords[i], specOutputRecords[i],
extraOutputRecords));
}
}
private static boolean eqPlusMinus(long a, long b, long x) {
final long diff = Math.abs(a - b);
return diff <= x;
}
}
protected void doSubmission(String jobCreatorName, boolean defaultOutputPath)
throws Exception {
final Path in = new Path("foo").makeQualified(
GridmixTestUtils.dfs.getUri(),
GridmixTestUtils.dfs.getWorkingDirectory());
final Path out = GridmixTestUtils.DEST.makeQualified(
GridmixTestUtils.dfs.getUri(),
GridmixTestUtils.dfs.getWorkingDirectory());
final Path root = new Path(workspace.getName()).makeQualified(
GridmixTestUtils.dfs.getUri(), GridmixTestUtils.dfs.getWorkingDirectory());
if (!workspace.exists()) {
assertTrue(workspace.mkdirs());
}
Configuration conf = null;
try {
ArrayList<String> argsList = new ArrayList<String>();
argsList.add("-D" + FilePool.GRIDMIX_MIN_FILE + "=0");
argsList.add("-D" + Gridmix.GRIDMIX_USR_RSV + "="
+ EchoUserResolver.class.getName());
if (jobCreatorName != null) {
argsList.add("-D" + JobCreator.GRIDMIX_JOB_TYPE + "=" + jobCreatorName);
}
// Set the config property gridmix.output.directory only if
// defaultOutputPath is false. If defaultOutputPath is true, then
// let us allow gridmix to use the path foo/gridmix/ as output dir.
if (!defaultOutputPath) {
argsList.add("-D" + Gridmix.GRIDMIX_OUT_DIR + "=" + out);
}
argsList.add("-generate");
argsList.add(String.valueOf(GENDATA) + "m");
argsList.add(in.toString());
argsList.add("-"); // ignored by DebugGridmix
String[] argv = argsList.toArray(new String[argsList.size()]);
DebugGridmix client = new DebugGridmix();
conf = GridmixTestUtils.mrvl.getConfig();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setEnum(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY, policy);
conf.setBoolean(GridmixJob.GRIDMIX_USE_QUEUE_IN_TRACE, true);
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
conf.set(MRJobConfig.USER_NAME, ugi.getUserName());
// allow synthetic users to create home directories
GridmixTestUtils.dfs.mkdirs(root, new FsPermission((short) 777));
GridmixTestUtils.dfs.setPermission(root, new FsPermission((short) 777));
int res = ToolRunner.run(conf, client, argv);
assertEquals("Client exited with nonzero status", 0, res);
client.checkMonitor();
} catch (Exception e) {
e.printStackTrace();
} finally {
in.getFileSystem(conf).delete(in, true);
out.getFileSystem(conf).delete(out, true);
root.getFileSystem(conf).delete(root, true);
}
}
}
| 16,457 | 41.637306 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import static org.junit.Assert.*;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* Test the basic functionality of PseudoLocalFs
*/
public class TestPseudoLocalFs {
/**
* Test if a file on PseudoLocalFs of a specific size can be opened and read.
* Validate the size of the data read.
* Test the read methods of {@link PseudoLocalFs.RandomInputStream}.
* @throws Exception
*/
@Test
public void testPseudoLocalFsFileSize() throws Exception {
long fileSize = 10000;
Path path = PseudoLocalFs.generateFilePath("myPsedoFile", fileSize);
PseudoLocalFs pfs = new PseudoLocalFs();
pfs.create(path);
// Read 1 byte at a time and validate file size.
InputStream in = pfs.open(path, 0);
long totalSize = 0;
while (in.read() >= 0) {
++totalSize;
}
in.close();
assertEquals("File size mismatch with read().", fileSize, totalSize);
// Read data from PseudoLocalFs-based file into buffer to
// validate read(byte[]) and file size.
in = pfs.open(path, 0);
totalSize = 0;
byte[] b = new byte[1024];
int bytesRead = in.read(b);
while (bytesRead >= 0) {
totalSize += bytesRead;
bytesRead = in.read(b);
}
assertEquals("File size mismatch with read(byte[]).", fileSize, totalSize);
}
/**
* Validate if file status is obtained for correctly formed file paths on
* PseudoLocalFs and also verify if appropriate exception is thrown for
* invalid file paths.
* @param pfs Pseudo Local File System
* @param path file path for which getFileStatus() is to be called
* @param shouldSucceed <code>true</code> if getFileStatus() should succeed
* @throws IOException
*/
private void validateGetFileStatus(FileSystem pfs, Path path,
boolean shouldSucceed) throws IOException {
boolean expectedExceptionSeen = false;
FileStatus stat = null;
try {
stat = pfs.getFileStatus(path);
} catch(FileNotFoundException e) {
expectedExceptionSeen = true;
}
if (shouldSucceed) {
assertFalse("getFileStatus() has thrown Exception for valid file name "
+ path, expectedExceptionSeen);
assertNotNull("Missing file status for a valid file.", stat);
// validate fileSize
String[] parts = path.toUri().getPath().split("\\.");
long expectedFileSize = Long.valueOf(parts[parts.length - 1]);
assertEquals("Invalid file size.", expectedFileSize, stat.getLen());
} else {
assertTrue("getFileStatus() did not throw Exception for invalid file "
+ " name " + path, expectedExceptionSeen);
}
}
/**
* Validate if file creation succeeds for correctly formed file paths on
* PseudoLocalFs and also verify if appropriate exception is thrown for
* invalid file paths.
* @param pfs Pseudo Local File System
* @param path file path for which create() is to be called
* @param shouldSucceed <code>true</code> if create() should succeed
* @throws IOException
*/
private void validateCreate(FileSystem pfs, Path path,
boolean shouldSucceed) throws IOException {
boolean expectedExceptionSeen = false;
try {
pfs.create(path);
} catch(IOException e) {
expectedExceptionSeen = true;
}
if (shouldSucceed) {
assertFalse("create() has thrown Exception for valid file name "
+ path, expectedExceptionSeen);
} else {
assertTrue("create() did not throw Exception for invalid file name "
+ path, expectedExceptionSeen);
}
}
/**
* Validate if opening of file succeeds for correctly formed file paths on
* PseudoLocalFs and also verify if appropriate exception is thrown for
* invalid file paths.
* @param pfs Pseudo Local File System
* @param path file path for which open() is to be called
* @param shouldSucceed <code>true</code> if open() should succeed
* @throws IOException
*/
private void validateOpen(FileSystem pfs, Path path,
boolean shouldSucceed) throws IOException {
boolean expectedExceptionSeen = false;
try {
pfs.open(path);
} catch(IOException e) {
expectedExceptionSeen = true;
}
if (shouldSucceed) {
assertFalse("open() has thrown Exception for valid file name "
+ path, expectedExceptionSeen);
} else {
assertTrue("open() did not throw Exception for invalid file name "
+ path, expectedExceptionSeen);
}
}
/**
* Validate if exists() returns <code>true</code> for correctly formed file
* paths on PseudoLocalFs and returns <code>false</code> for improperly
* formed file paths.
* @param pfs Pseudo Local File System
* @param path file path for which exists() is to be called
* @param shouldSucceed expected return value of exists(<path>)
* @throws IOException
*/
private void validateExists(FileSystem pfs, Path path,
boolean shouldSucceed) throws IOException {
boolean ret = pfs.exists(path);
if (shouldSucceed) {
assertTrue("exists() returned false for valid file name " + path, ret);
} else {
assertFalse("exists() returned true for invalid file name " + path, ret);
}
}
/**
* Test Pseudo Local File System methods like getFileStatus(), create(),
* open(), exists() for <li> valid file paths and <li> invalid file paths.
* @throws IOException
*/
@Test
public void testPseudoLocalFsFileNames() throws IOException {
PseudoLocalFs pfs = new PseudoLocalFs();
Configuration conf = new Configuration();
conf.setClass("fs.pseudo.impl", PseudoLocalFs.class, FileSystem.class);
Path path = new Path("pseudo:///myPsedoFile.1234");
FileSystem testFs = path.getFileSystem(conf);
assertEquals("Failed to obtain a pseudo local file system object from path",
pfs.getUri().getScheme(), testFs.getUri().getScheme());
// Validate PseudoLocalFS operations on URI of some other file system
path = new Path("file:///myPsedoFile.12345");
validateGetFileStatus(pfs, path, false);
validateCreate(pfs, path, false);
validateOpen(pfs, path, false);
validateExists(pfs, path, false);
path = new Path("pseudo:///myPsedoFile");//.<fileSize> missing
validateGetFileStatus(pfs, path, false);
validateCreate(pfs, path, false);
validateOpen(pfs, path, false);
validateExists(pfs, path, false);
// thing after final '.' is not a number
path = new Path("pseudo:///myPsedoFile.txt");
validateGetFileStatus(pfs, path, false);
validateCreate(pfs, path, false);
validateOpen(pfs, path, false);
validateExists(pfs, path, false);
// Generate valid file name(relative path) and validate operations on it
long fileSize = 231456;
path = PseudoLocalFs.generateFilePath("my.Psedo.File", fileSize);
// Validate the above generateFilePath()
assertEquals("generateFilePath() failed.", fileSize,
pfs.validateFileNameFormat(path));
validateGetFileStatus(pfs, path, true);
validateCreate(pfs, path, true);
validateOpen(pfs, path, true);
validateExists(pfs, path, true);
// Validate operations on valid qualified path
path = new Path("myPsedoFile.1237");
path = path.makeQualified(pfs);
validateGetFileStatus(pfs, path, true);
validateCreate(pfs, path, true);
validateOpen(pfs, path, true);
validateExists(pfs, path, true);
}
}
| 8,524 | 35.431624 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.rumen.JobStory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
/**
* Component generating random job traces for testing on a single node.
*/
public class DebugJobFactory {
interface Debuggable {
ArrayList<JobStory> getSubmitted();
}
public static JobFactory<?> getFactory(
JobSubmitter submitter, Path scratch, int numJobs, Configuration conf,
CountDownLatch startFlag, UserResolver resolver) throws IOException {
GridmixJobSubmissionPolicy policy = GridmixJobSubmissionPolicy.getPolicy(
conf, GridmixJobSubmissionPolicy.STRESS);
if (policy == GridmixJobSubmissionPolicy.REPLAY) {
return new DebugReplayJobFactory(
submitter, scratch, numJobs, conf, startFlag, resolver);
} else if (policy == GridmixJobSubmissionPolicy.STRESS) {
return new DebugStressJobFactory(
submitter, scratch, numJobs, conf, startFlag, resolver);
} else if (policy == GridmixJobSubmissionPolicy.SERIAL) {
return new DebugSerialJobFactory(
submitter, scratch, numJobs, conf, startFlag, resolver);
}
return null;
}
static class DebugReplayJobFactory extends ReplayJobFactory
implements Debuggable {
public DebugReplayJobFactory(
JobSubmitter submitter, Path scratch, int numJobs, Configuration conf,
CountDownLatch startFlag, UserResolver resolver) throws IOException {
super(
submitter, new DebugJobProducer(numJobs, conf), scratch, conf,
startFlag, resolver);
}
@Override
public ArrayList<JobStory> getSubmitted() {
return ((DebugJobProducer) jobProducer).submitted;
}
}
static class DebugSerialJobFactory extends SerialJobFactory
implements Debuggable {
public DebugSerialJobFactory(
JobSubmitter submitter, Path scratch, int numJobs, Configuration conf,
CountDownLatch startFlag, UserResolver resolver) throws IOException {
super(
submitter, new DebugJobProducer(numJobs, conf), scratch, conf,
startFlag, resolver);
}
@Override
public ArrayList<JobStory> getSubmitted() {
return ((DebugJobProducer) jobProducer).submitted;
}
}
static class DebugStressJobFactory extends StressJobFactory
implements Debuggable {
public DebugStressJobFactory(
JobSubmitter submitter, Path scratch, int numJobs, Configuration conf,
CountDownLatch startFlag, UserResolver resolver) throws IOException {
super(
submitter, new DebugJobProducer(numJobs, conf), scratch, conf,
startFlag, resolver);
}
@Override
public ArrayList<JobStory> getSubmitted() {
return ((DebugJobProducer) jobProducer).submitted;
}
}
}
| 3,688 | 33.476636 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import static org.apache.hadoop.tools.rumen.datatypes.util.MapReduceJobPropertiesParser.extractMaxHeapOpts;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Synthetic job generated from a trace description.
*/
abstract class GridmixJob implements Callable<Job>, Delayed {
// Gridmix job name format is GRIDMIX<6 digit sequence number>
public static final String JOB_NAME_PREFIX = "GRIDMIX";
public static final Log LOG = LogFactory.getLog(GridmixJob.class);
private static final ThreadLocal<Formatter> nameFormat =
new ThreadLocal<Formatter>() {
@Override
protected Formatter initialValue() {
final StringBuilder sb =
new StringBuilder(JOB_NAME_PREFIX.length() + 6);
sb.append(JOB_NAME_PREFIX);
return new Formatter(sb);
}
};
private boolean submitted;
protected final int seq;
protected final Path outdir;
protected final Job job;
protected final JobStory jobdesc;
protected final UserGroupInformation ugi;
protected final long submissionTimeNanos;
private static final ConcurrentHashMap<Integer,List<InputSplit>> descCache =
new ConcurrentHashMap<Integer,List<InputSplit>>();
protected static final String GRIDMIX_JOB_SEQ = "gridmix.job.seq";
protected static final String GRIDMIX_USE_QUEUE_IN_TRACE =
"gridmix.job-submission.use-queue-in-trace";
protected static final String GRIDMIX_DEFAULT_QUEUE =
"gridmix.job-submission.default-queue";
// configuration key to enable/disable High-Ram feature emulation
static final String GRIDMIX_HIGHRAM_EMULATION_ENABLE =
"gridmix.highram-emulation.enable";
// configuration key to enable/disable task jvm options
static final String GRIDMIX_TASK_JVM_OPTIONS_ENABLE =
"gridmix.task.jvm-options.enable";
private static void setJobQueue(Job job, String queue) {
if (queue != null) {
job.getConfiguration().set(MRJobConfig.QUEUE_NAME, queue);
}
}
public GridmixJob(final Configuration conf, long submissionMillis,
final JobStory jobdesc, Path outRoot, UserGroupInformation ugi,
final int seq) throws IOException {
this.ugi = ugi;
this.jobdesc = jobdesc;
this.seq = seq;
((StringBuilder)nameFormat.get().out()).setLength(JOB_NAME_PREFIX.length());
try {
job = this.ugi.doAs(new PrivilegedExceptionAction<Job>() {
public Job run() throws IOException {
String jobId = null == jobdesc.getJobID()
? "<unknown>"
: jobdesc.getJobID().toString();
Job ret = Job.getInstance(conf, nameFormat.get().format("%06d", seq)
.toString());
ret.getConfiguration().setInt(GRIDMIX_JOB_SEQ, seq);
ret.getConfiguration().set(Gridmix.ORIGINAL_JOB_ID, jobId);
ret.getConfiguration().set(Gridmix.ORIGINAL_JOB_NAME,
jobdesc.getName());
if (conf.getBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false)) {
setJobQueue(ret, jobdesc.getQueueName());
} else {
setJobQueue(ret, conf.get(GRIDMIX_DEFAULT_QUEUE));
}
// check if the job can emulate compression
if (canEmulateCompression()) {
// set the compression related configs if compression emulation is
// enabled
if (CompressionEmulationUtil.isCompressionEmulationEnabled(conf)) {
CompressionEmulationUtil.configureCompressionEmulation(
jobdesc.getJobConf(), ret.getConfiguration());
}
}
// configure high ram properties if enabled
if (conf.getBoolean(GRIDMIX_HIGHRAM_EMULATION_ENABLE, true)) {
configureHighRamProperties(jobdesc.getJobConf(),
ret.getConfiguration());
}
// configure task jvm options if enabled
// this knob can be turned off if there is a mismatch between the
// target (simulation) cluster and the original cluster. Such a
// mismatch can result in job failures (due to memory issues) on the
// target (simulated) cluster.
//
// TODO If configured, scale the original task's JVM (heap related)
// options to suit the target (simulation) cluster
if (conf.getBoolean(GRIDMIX_TASK_JVM_OPTIONS_ENABLE, true)) {
configureTaskJVMOptions(jobdesc.getJobConf(),
ret.getConfiguration());
}
return ret;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
submissionTimeNanos = TimeUnit.NANOSECONDS.convert(
submissionMillis, TimeUnit.MILLISECONDS);
outdir = new Path(outRoot, "" + seq);
}
@SuppressWarnings("deprecation")
protected static void configureTaskJVMOptions(Configuration originalJobConf,
Configuration simulatedJobConf){
// Get the heap related java opts used for the original job and set the
// same for the simulated job.
// set task task heap options
configureTaskJVMMaxHeapOptions(originalJobConf, simulatedJobConf,
JobConf.MAPRED_TASK_JAVA_OPTS);
// set map task heap options
configureTaskJVMMaxHeapOptions(originalJobConf, simulatedJobConf,
MRJobConfig.MAP_JAVA_OPTS);
// set reduce task heap options
configureTaskJVMMaxHeapOptions(originalJobConf, simulatedJobConf,
MRJobConfig.REDUCE_JAVA_OPTS);
}
// Configures the task's max heap options using the specified key
private static void configureTaskJVMMaxHeapOptions(Configuration srcConf,
Configuration destConf,
String key) {
String srcHeapOpts = srcConf.get(key);
if (srcHeapOpts != null) {
List<String> srcMaxOptsList = new ArrayList<String>();
// extract the max heap options and ignore the rest
extractMaxHeapOpts(srcHeapOpts, srcMaxOptsList,
new ArrayList<String>());
if (srcMaxOptsList.size() > 0) {
List<String> destOtherOptsList = new ArrayList<String>();
// extract the other heap options and ignore the max options in the
// destination configuration
String destHeapOpts = destConf.get(key);
if (destHeapOpts != null) {
extractMaxHeapOpts(destHeapOpts, new ArrayList<String>(),
destOtherOptsList);
}
// the source configuration might have some task level max heap opts set
// remove these opts from the destination configuration and replace
// with the options set in the original configuration
StringBuilder newHeapOpts = new StringBuilder();
for (String otherOpt : destOtherOptsList) {
newHeapOpts.append(otherOpt).append(" ");
}
for (String opts : srcMaxOptsList) {
newHeapOpts.append(opts).append(" ");
}
// set the final heap opts
destConf.set(key, newHeapOpts.toString().trim());
}
}
}
// Scales the desired job-level configuration parameter. This API makes sure
// that the ratio of the job level configuration parameter to the cluster
// level configuration parameter is maintained in the simulated run. Hence
// the values are scaled from the original cluster's configuration to the
// simulated cluster's configuration for higher emulation accuracy.
// This kind of scaling is useful for memory parameters.
private static void scaleConfigParameter(Configuration sourceConf,
Configuration destConf, String clusterValueKey,
String jobValueKey, long defaultValue) {
long simulatedClusterDefaultValue =
destConf.getLong(clusterValueKey, defaultValue);
long originalClusterDefaultValue =
sourceConf.getLong(clusterValueKey, defaultValue);
long originalJobValue =
sourceConf.getLong(jobValueKey, defaultValue);
double scaleFactor = (double)originalJobValue/originalClusterDefaultValue;
long simulatedJobValue = (long)(scaleFactor * simulatedClusterDefaultValue);
if (LOG.isDebugEnabled()) {
LOG.debug("For the job configuration parameter '" + jobValueKey
+ "' and the cluster configuration parameter '"
+ clusterValueKey + "', the original job's configuration value"
+ " is scaled from '" + originalJobValue + "' to '"
+ simulatedJobValue + "' using the default (unit) value of "
+ "'" + originalClusterDefaultValue + "' for the original "
+ " cluster and '" + simulatedClusterDefaultValue + "' for the"
+ " simulated cluster.");
}
destConf.setLong(jobValueKey, simulatedJobValue);
}
// Checks if the scaling of original job's memory parameter value is
// valid
@SuppressWarnings("deprecation")
private static boolean checkMemoryUpperLimits(String jobKey, String limitKey,
Configuration conf,
boolean convertLimitToMB) {
if (conf.get(limitKey) != null) {
long limit = conf.getLong(limitKey, JobConf.DISABLED_MEMORY_LIMIT);
// scale only if the max memory limit is set.
if (limit >= 0) {
if (convertLimitToMB) {
limit /= (1024 * 1024); //Converting to MB
}
long scaledConfigValue =
conf.getLong(jobKey, JobConf.DISABLED_MEMORY_LIMIT);
// check now
if (scaledConfigValue > limit) {
throw new RuntimeException("Simulated job's configuration"
+ " parameter '" + jobKey + "' got scaled to a value '"
+ scaledConfigValue + "' which exceeds the upper limit of '"
+ limit + "' defined for the simulated cluster by the key '"
+ limitKey + "'. To disable High-Ram feature emulation, set '"
+ GRIDMIX_HIGHRAM_EMULATION_ENABLE + "' to 'false'.");
}
return true;
}
}
return false;
}
// Check if the parameter scaling does not exceed the cluster limits.
@SuppressWarnings("deprecation")
private static void validateTaskMemoryLimits(Configuration conf,
String jobKey, String clusterMaxKey) {
if (!checkMemoryUpperLimits(jobKey,
JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY, conf, true)) {
checkMemoryUpperLimits(jobKey, clusterMaxKey, conf, false);
}
}
/**
* Sets the high ram job properties in the simulated job's configuration.
*/
@SuppressWarnings("deprecation")
static void configureHighRamProperties(Configuration sourceConf,
Configuration destConf) {
// set the memory per map task
scaleConfigParameter(sourceConf, destConf,
MRConfig.MAPMEMORY_MB, MRJobConfig.MAP_MEMORY_MB,
MRJobConfig.DEFAULT_MAP_MEMORY_MB);
// validate and fail early
validateTaskMemoryLimits(destConf, MRJobConfig.MAP_MEMORY_MB,
JTConfig.JT_MAX_MAPMEMORY_MB);
// set the memory per reduce task
scaleConfigParameter(sourceConf, destConf,
MRConfig.REDUCEMEMORY_MB, MRJobConfig.REDUCE_MEMORY_MB,
MRJobConfig.DEFAULT_REDUCE_MEMORY_MB);
// validate and fail early
validateTaskMemoryLimits(destConf, MRJobConfig.REDUCE_MEMORY_MB,
JTConfig.JT_MAX_REDUCEMEMORY_MB);
}
/**
* Indicates whether this {@link GridmixJob} supports compression emulation.
*/
protected abstract boolean canEmulateCompression();
protected GridmixJob(final Configuration conf, long submissionMillis,
final String name) throws IOException {
submissionTimeNanos = TimeUnit.NANOSECONDS.convert(
submissionMillis, TimeUnit.MILLISECONDS);
jobdesc = null;
outdir = null;
seq = -1;
ugi = UserGroupInformation.getCurrentUser();
try {
job = this.ugi.doAs(new PrivilegedExceptionAction<Job>() {
public Job run() throws IOException {
Job ret = Job.getInstance(conf, name);
ret.getConfiguration().setInt(GRIDMIX_JOB_SEQ, seq);
setJobQueue(ret, conf.get(GRIDMIX_DEFAULT_QUEUE));
return ret;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
public UserGroupInformation getUgi() {
return ugi;
}
public String toString() {
return job.getJobName();
}
public long getDelay(TimeUnit unit) {
return unit.convert(submissionTimeNanos - System.nanoTime(),
TimeUnit.NANOSECONDS);
}
@Override
public int compareTo(Delayed other) {
if (this == other) {
return 0;
}
if (other instanceof GridmixJob) {
final long otherNanos = ((GridmixJob)other).submissionTimeNanos;
if (otherNanos < submissionTimeNanos) {
return 1;
}
if (otherNanos > submissionTimeNanos) {
return -1;
}
return id() - ((GridmixJob)other).id();
}
final long diff =
getDelay(TimeUnit.NANOSECONDS) - other.getDelay(TimeUnit.NANOSECONDS);
return 0 == diff ? 0 : (diff > 0 ? 1 : -1);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
// not possible unless job is cloned; all jobs should be unique
return other instanceof GridmixJob && id() == ((GridmixJob)other).id();
}
@Override
public int hashCode() {
return id();
}
int id() {
return seq;
}
Job getJob() {
return job;
}
JobStory getJobDesc() {
return jobdesc;
}
void setSubmitted() {
submitted = true;
}
boolean isSubmitted() {
return submitted;
}
static void pushDescription(int seq, List<InputSplit> splits) {
if (null != descCache.putIfAbsent(seq, splits)) {
throw new IllegalArgumentException("Description exists for id " + seq);
}
}
static List<InputSplit> pullDescription(JobContext jobCtxt) {
return pullDescription(GridmixJob.getJobSeqId(jobCtxt));
}
static List<InputSplit> pullDescription(int seq) {
return descCache.remove(seq);
}
static void clearAll() {
descCache.clear();
}
void buildSplits(FilePool inputDir) throws IOException {
}
static int getJobSeqId(JobContext job) {
return job.getConfiguration().getInt(GRIDMIX_JOB_SEQ,-1);
}
public static class DraftPartitioner<V> extends Partitioner<GridmixKey,V> {
public int getPartition(GridmixKey key, V value, int numReduceTasks) {
return key.getPartition();
}
}
public static class SpecGroupingComparator
implements RawComparator<GridmixKey> {
private final DataInputBuffer di = new DataInputBuffer();
private final byte[] reset = di.getData();
@Override
public int compare(GridmixKey g1, GridmixKey g2) {
final byte t1 = g1.getType();
final byte t2 = g2.getType();
if (t1 == GridmixKey.REDUCE_SPEC ||
t2 == GridmixKey.REDUCE_SPEC) {
return t1 - t2;
}
assert t1 == GridmixKey.DATA;
assert t2 == GridmixKey.DATA;
return g1.compareTo(g2);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
final int ret;
di.reset(b1, s1, l1);
final int x1 = WritableUtils.readVInt(di);
di.reset(b2, s2, l2);
final int x2 = WritableUtils.readVInt(di);
final int t1 = b1[s1 + x1];
final int t2 = b2[s2 + x2];
if (t1 == GridmixKey.REDUCE_SPEC ||
t2 == GridmixKey.REDUCE_SPEC) {
ret = t1 - t2;
} else {
assert t1 == GridmixKey.DATA;
assert t2 == GridmixKey.DATA;
ret =
WritableComparator.compareBytes(b1, s1, x1, b2, s2, x2);
}
di.reset(reset, 0, 0);
return ret;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
static class RawBytesOutputFormat<K>
extends FileOutputFormat<K,GridmixRecord> {
@Override
public RecordWriter<K,GridmixRecord> getRecordWriter(
TaskAttemptContext job) throws IOException {
Path file = getDefaultWorkFile(job, "");
final DataOutputStream fileOut;
fileOut =
new DataOutputStream(CompressionEmulationUtil
.getPossiblyCompressedOutputStream(file, job.getConfiguration()));
return new RecordWriter<K,GridmixRecord>() {
@Override
public void write(K ignored, GridmixRecord value)
throws IOException {
// Let the Gridmix record fill itself.
value.write(fileOut);
}
@Override
public void close(TaskAttemptContext ctxt) throws IOException {
fileOut.close();
}
};
}
}
}
| 19,433 | 35.87666 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.Random;
import java.net.URI;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.util.Progressable;
/**
* Pseudo local file system that generates random data for any file on the fly
* instead of storing files on disk. So opening same file multiple times will
* not give same file content. There are no directories in this file system
* other than the root and all the files are under root i.e. "/". All file URIs
* on pseudo local file system should be of the format <code>
* pseudo:///<name>.<fileSize></code> where name is a unique name
* and <fileSize> is a number representing the size of the file in bytes.
*/
class PseudoLocalFs extends FileSystem {
Path home;
/**
* The creation time and modification time of all files in
* {@link PseudoLocalFs} is same.
*/
private static final long TIME = System.currentTimeMillis();
private static final String HOME_DIR = "/";
private static final long BLOCK_SIZE = 4 * 1024 * 1024L; // 4 MB
private static final int DEFAULT_BUFFER_SIZE = 1024 * 1024; // 1MB
static final URI NAME = URI.create("pseudo:///");
PseudoLocalFs() {
this(new Path(HOME_DIR));
}
PseudoLocalFs(Path home) {
super();
this.home = home;
}
@Override
public URI getUri() {
return NAME;
}
@Override
public Path getHomeDirectory() {
return home;
}
@Override
public Path getWorkingDirectory() {
return getHomeDirectory();
}
/**
* Generates a valid pseudo local file path from the given <code>fileId</code>
* and <code>fileSize</code>.
* @param fileId unique file id string
* @param fileSize file size
* @return the generated relative path
*/
static Path generateFilePath(String fileId, long fileSize) {
return new Path(fileId + "." + fileSize);
}
/**
* Creating a pseudo local file is nothing but validating the file path.
* Actual data of the file is generated on the fly when client tries to open
* the file for reading.
* @param path file path to be created
*/
@Override
public FSDataOutputStream create(Path path) throws IOException {
try {
validateFileNameFormat(path);
} catch (FileNotFoundException e) {
throw new IOException("File creation failed for " + path);
}
return null;
}
/**
* Validate if the path provided is of expected format of Pseudo Local File
* System based files.
* @param path file path
* @return the file size
* @throws FileNotFoundException
*/
long validateFileNameFormat(Path path) throws FileNotFoundException {
path = path.makeQualified(this);
boolean valid = true;
long fileSize = 0;
if (!path.toUri().getScheme().equals(getUri().getScheme())) {
valid = false;
} else {
String[] parts = path.toUri().getPath().split("\\.");
try {
fileSize = Long.parseLong(parts[parts.length - 1]);
valid = (fileSize >= 0);
} catch (NumberFormatException e) {
valid = false;
}
}
if (!valid) {
throw new FileNotFoundException("File " + path
+ " does not exist in pseudo local file system");
}
return fileSize;
}
/**
* @See create(Path) for details
*/
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
long fileSize = validateFileNameFormat(path);
InputStream in = new RandomInputStream(fileSize, bufferSize);
return new FSDataInputStream(in);
}
/**
* @See create(Path) for details
*/
@Override
public FSDataInputStream open(Path path) throws IOException {
return open(path, DEFAULT_BUFFER_SIZE);
}
@Override
public FileStatus getFileStatus(Path path) throws IOException {
long fileSize = validateFileNameFormat(path);
return new FileStatus(fileSize, false, 1, BLOCK_SIZE, TIME, path);
}
@Override
public boolean exists(Path path) {
try{
validateFileNameFormat(path);
} catch (FileNotFoundException e) {
return false;
}
return true;
}
@Override
public FSDataOutputStream create(Path path, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return create(path);
}
@Override
public FileStatus[] listStatus(Path path) throws FileNotFoundException,
IOException {
return new FileStatus[] {getFileStatus(path)};
}
/**
* Input Stream that generates specified number of random bytes.
*/
static class RandomInputStream extends InputStream
implements Seekable, PositionedReadable {
private final Random r = new Random();
private BytesWritable val = null;
private int positionInVal = 0;// current position in the buffer 'val'
private long totalSize = 0;// total number of random bytes to be generated
private long curPos = 0;// current position in this stream
/**
* @param size total number of random bytes to be generated in this stream
* @param bufferSize the buffer size. An internal buffer array of length
* <code>bufferSize</code> is created. If <code>bufferSize</code> is not a
* positive number, then a default value of 1MB is used.
*/
RandomInputStream(long size, int bufferSize) {
totalSize = size;
if (bufferSize <= 0) {
bufferSize = DEFAULT_BUFFER_SIZE;
}
val = new BytesWritable(new byte[bufferSize]);
}
@Override
public int read() throws IOException {
byte[] b = new byte[1];
if (curPos < totalSize) {
if (positionInVal < val.getLength()) {// use buffered byte
b[0] = val.getBytes()[positionInVal++];
++curPos;
} else {// generate data
int num = read(b);
if (num < 0) {
return num;
}
}
} else {
return -1;
}
return b[0];
}
@Override
public int read(byte[] bytes) throws IOException {
return read(bytes, 0, bytes.length);
}
@Override
public int read(byte[] bytes, int off, int len) throws IOException {
if (curPos == totalSize) {
return -1;// EOF
}
int numBytes = len;
if (numBytes > (totalSize - curPos)) {// position in file is close to EOF
numBytes = (int)(totalSize - curPos);
}
if (numBytes > (val.getLength() - positionInVal)) {
// need to generate data into val
r.nextBytes(val.getBytes());
positionInVal = 0;
}
System.arraycopy(val.getBytes(), positionInVal, bytes, off, numBytes);
curPos += numBytes;
positionInVal += numBytes;
return numBytes;
}
@Override
public int available() {
return (int)(val.getLength() - positionInVal);
}
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
throw new UnsupportedOperationException();
}
/**
* Get the current position in this stream/pseudo-file
* @return the position in this stream/pseudo-file
* @throws IOException
*/
@Override
public long getPos() throws IOException {
return curPos;
}
@Override
public void seek(long pos) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
throw new UnsupportedOperationException();
}
}
@Override
public FSDataOutputStream append(Path path, int bufferSize,
Progressable progress) throws IOException {
throw new UnsupportedOperationException("Append is not supported"
+ " in pseudo local file system.");
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
throw new UnsupportedOperationException("Mkdirs is not supported"
+ " in pseudo local file system.");
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
throw new UnsupportedOperationException("Rename is not supported"
+ " in pseudo local file system.");
}
@Override
public boolean delete(Path path, boolean recursive) {
throw new UnsupportedOperationException("File deletion is not supported "
+ "in pseudo local file system.");
}
@Override
public void setWorkingDirectory(Path newDir) {
throw new UnsupportedOperationException("SetWorkingDirectory "
+ "is not supported in pseudo local file system.");
}
}
| 10,131 | 29.426426 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.gridmix.Gridmix.Component;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
/**
* Component collecting the stats required by other components
* to make decisions.
* Single thread collector tries to collect the stats (currently cluster stats)
* and caches it internally.
* Components interested in these stats need to register themselves and will get
* notified either on every job completion event or some fixed time interval.
*/
public class Statistics implements Component<Statistics.JobStats> {
public static final Log LOG = LogFactory.getLog(Statistics.class);
private final StatCollector statistics = new StatCollector();
private JobClient cluster;
//List of cluster status listeners.
private final List<StatListener<ClusterStats>> clusterStatlisteners =
new CopyOnWriteArrayList<StatListener<ClusterStats>>();
//List of job status listeners.
private final List<StatListener<JobStats>> jobStatListeners =
new CopyOnWriteArrayList<StatListener<JobStats>>();
// A map of job-sequence-id to job-stats of submitted jobs
private static final Map<Integer, JobStats> submittedJobsMap =
new ConcurrentHashMap<Integer, JobStats>();
// total number of map tasks submitted
private static volatile int numMapsSubmitted = 0;
// total number of reduce tasks submitted
private static volatile int numReducesSubmitted = 0;
private int completedJobsInCurrentInterval = 0;
private final int jtPollingInterval;
private volatile boolean shutdown = false;
private final int maxJobCompletedInInterval;
private static final String MAX_JOBS_COMPLETED_IN_POLL_INTERVAL_KEY =
"gridmix.max-jobs-completed-in-poll-interval";
private final ReentrantLock lock = new ReentrantLock();
private final Condition jobCompleted = lock.newCondition();
private final CountDownLatch startFlag;
public Statistics(
final Configuration conf, int pollingInterval, CountDownLatch startFlag)
throws IOException, InterruptedException {
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
this.cluster = ugi.doAs(new PrivilegedExceptionAction<JobClient>() {
public JobClient run() throws IOException {
return new JobClient(new JobConf(conf));
}
});
this.jtPollingInterval = pollingInterval;
maxJobCompletedInInterval = conf.getInt(
MAX_JOBS_COMPLETED_IN_POLL_INTERVAL_KEY, 1);
this.startFlag = startFlag;
}
/**
* Generates a job stats.
*/
public static JobStats generateJobStats(Job job, JobStory jobdesc) {
int seq = GridmixJob.getJobSeqId(job);
// bail out if job description is missing for a job to be simulated
if (seq >= 0 && jobdesc == null) {
throw new IllegalArgumentException("JobStory not available for job "
+ job.getJobID());
}
int maps = -1;
int reds = -1;
if (jobdesc != null) {
// Note that the ZombieJob will return a >= 0 value
maps = jobdesc.getNumberMaps();
reds = jobdesc.getNumberReduces();
}
return new JobStats(maps, reds, job);
}
private static void addToNumMapsSubmitted(int numMaps) {
numMapsSubmitted += numMaps;
}
private static void addToNumReducesSubmitted(int numReduces) {
numReducesSubmitted += numReduces;
}
private static void subtractFromNumMapsSubmitted(int numMaps) {
numMapsSubmitted -= numMaps;
}
private static void subtractFromNumReducesSubmitted(int numReduces) {
numReducesSubmitted -= numReduces;
}
/**
* Add a submitted job for monitoring.
*/
public void addJobStats(JobStats stats) {
int seq = GridmixJob.getJobSeqId(stats.getJob());
if (seq < 0) {
LOG.info("Not tracking job " + stats.getJob().getJobName()
+ " as seq id is less than zero: " + seq);
return;
}
submittedJobsMap.put(seq, stats);
addToNumMapsSubmitted(stats.getNoOfMaps());
addToNumReducesSubmitted(stats.getNoOfReds());
}
/**
* Used by JobMonitor to add the completed job.
*/
@Override
public void add(Statistics.JobStats job) {
//This thread will be notified initially by job-monitor incase of
//data generation. Ignore that as we are getting once the input is
//generated.
if (!statistics.isAlive()) {
return;
}
JobStats stat = submittedJobsMap.remove(GridmixJob.getJobSeqId(job.getJob()));
// stat cannot be null
if (stat == null) {
LOG.error("[Statistics] Missing entry for job "
+ job.getJob().getJobID());
return;
}
// update the total number of submitted map/reduce task count
subtractFromNumMapsSubmitted(stat.getNoOfMaps());
subtractFromNumReducesSubmitted(stat.getNoOfReds());
completedJobsInCurrentInterval++;
//check if we have reached the maximum level of job completions.
if (completedJobsInCurrentInterval >= maxJobCompletedInInterval) {
if (LOG.isDebugEnabled()) {
LOG.debug(
" Reached maximum limit of jobs in a polling interval " +
completedJobsInCurrentInterval);
}
completedJobsInCurrentInterval = 0;
lock.lock();
try {
//Job is completed notify all the listeners.
for (StatListener<JobStats> l : jobStatListeners) {
l.update(stat);
}
this.jobCompleted.signalAll();
} finally {
lock.unlock();
}
}
}
//TODO: We have just 2 types of listeners as of now . If no of listeners
//increase then we should move to map kind of model.
public void addClusterStatsObservers(StatListener<ClusterStats> listener) {
clusterStatlisteners.add(listener);
}
public void addJobStatsListeners(StatListener<JobStats> listener) {
this.jobStatListeners.add(listener);
}
/**
* Attempt to start the service.
*/
@Override
public void start() {
statistics.start();
}
private class StatCollector extends Thread {
StatCollector() {
super("StatsCollectorThread");
}
public void run() {
try {
startFlag.await();
if (Thread.currentThread().isInterrupted()) {
return;
}
} catch (InterruptedException ie) {
LOG.error(
"Statistics Error while waiting for other threads to get ready ", ie);
return;
}
while (!shutdown) {
lock.lock();
try {
jobCompleted.await(jtPollingInterval, TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
if (!shutdown) {
LOG.error("Statistics interrupt while waiting for completion of "
+ "a job.", ie);
}
return;
} finally {
lock.unlock();
}
//Fetch cluster data only if required.i.e .
// only if there are clusterStats listener.
if (clusterStatlisteners.size() > 0) {
try {
ClusterStatus clusterStatus = cluster.getClusterStatus();
updateAndNotifyClusterStatsListeners(clusterStatus);
} catch (IOException e) {
LOG.error(
"Statistics io exception while polling JT ", e);
return;
}
}
}
}
private void updateAndNotifyClusterStatsListeners(
ClusterStatus clusterStatus) {
ClusterStats stats = ClusterStats.getClusterStats();
stats.setClusterMetric(clusterStatus);
for (StatListener<ClusterStats> listener : clusterStatlisteners) {
listener.update(stats);
}
}
}
/**
* Wait until the service completes. It is assumed that either a
* {@link #shutdown} or {@link #abort} has been requested.
*/
@Override
public void join(long millis) throws InterruptedException {
statistics.join(millis);
}
@Override
public void shutdown() {
shutdown = true;
submittedJobsMap.clear();
clusterStatlisteners.clear();
jobStatListeners.clear();
statistics.interrupt();
}
@Override
public void abort() {
shutdown = true;
submittedJobsMap.clear();
clusterStatlisteners.clear();
jobStatListeners.clear();
statistics.interrupt();
}
/**
* Class to encapsulate the JobStats information.
* Current we just need information about completedJob.
* TODO: In future we need to extend this to send more information.
*/
static class JobStats {
private final int noOfMaps;
private final int noOfReds;
private JobStatus currentStatus;
private final Job job;
public JobStats(int noOfMaps,int numOfReds, Job job){
this.job = job;
this.noOfMaps = noOfMaps;
this.noOfReds = numOfReds;
}
public int getNoOfMaps() {
return noOfMaps;
}
public int getNoOfReds() {
return noOfReds;
}
/**
* Returns the job ,
* We should not use job.getJobID it returns null in 20.1xx.
* Use (GridmixJob.getJobSeqId(job)) instead
* @return job
*/
public Job getJob() {
return job;
}
/**
* Update the job statistics.
*/
public synchronized void updateJobStatus(JobStatus status) {
this.currentStatus = status;
}
/**
* Get the current job status.
*/
public synchronized JobStatus getJobStatus() {
return currentStatus;
}
}
static class ClusterStats {
private ClusterStatus status = null;
private static ClusterStats stats = new ClusterStats();
private ClusterStats() {
}
/**
* @return stats
*/
static ClusterStats getClusterStats() {
return stats;
}
/**
* @param metrics
*/
void setClusterMetric(ClusterStatus metrics) {
this.status = metrics;
}
/**
* @return metrics
*/
public ClusterStatus getStatus() {
return status;
}
int getNumRunningJob() {
return submittedJobsMap.size();
}
/**
* @return runningWatitingJobs
*/
static Collection<JobStats> getRunningJobStats() {
return submittedJobsMap.values();
}
/**
* Returns the total number of submitted map tasks
*/
static int getSubmittedMapTasks() {
return numMapsSubmitted;
}
/**
* Returns the total number of submitted reduce tasks
*/
static int getSubmittedReduceTasks() {
return numReducesSubmitted;
}
}
}
| 12,068 | 28.726601 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.gridmix.Statistics.ClusterStats;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
public class StressJobFactory extends JobFactory<Statistics.ClusterStats> {
public static final Log LOG = LogFactory.getLog(StressJobFactory.class);
private final LoadStatus loadStatus = new LoadStatus();
/**
* The minimum ratio between pending+running map tasks (aka. incomplete map
* tasks) and cluster map slot capacity for us to consider the cluster is
* overloaded. For running maps, we only count them partially. Namely, a 40%
* completed map is counted as 0.6 map tasks in our calculation.
*/
private static final float OVERLOAD_MAPTASK_MAPSLOT_RATIO = 2.0f;
public static final String CONF_OVERLOAD_MAPTASK_MAPSLOT_RATIO=
"gridmix.throttle.maps.task-to-slot-ratio";
final float overloadMapTaskMapSlotRatio;
/**
* The minimum ratio between pending+running reduce tasks (aka. incomplete
* reduce tasks) and cluster reduce slot capacity for us to consider the
* cluster is overloaded. For running reduces, we only count them partially.
* Namely, a 40% completed reduce is counted as 0.6 reduce tasks in our
* calculation.
*/
private static final float OVERLOAD_REDUCETASK_REDUCESLOT_RATIO = 2.5f;
public static final String CONF_OVERLOAD_REDUCETASK_REDUCESLOT_RATIO=
"gridmix.throttle.reduces.task-to-slot-ratio";
final float overloadReduceTaskReduceSlotRatio;
/**
* The maximum share of the cluster's mapslot capacity that can be counted
* toward a job's incomplete map tasks in overload calculation.
*/
private static final float MAX_MAPSLOT_SHARE_PER_JOB=0.1f;
public static final String CONF_MAX_MAPSLOT_SHARE_PER_JOB=
"gridmix.throttle.maps.max-slot-share-per-job";
final float maxMapSlotSharePerJob;
/**
* The maximum share of the cluster's reduceslot capacity that can be counted
* toward a job's incomplete reduce tasks in overload calculation.
*/
private static final float MAX_REDUCESLOT_SHARE_PER_JOB=0.1f;
public static final String CONF_MAX_REDUCESLOT_SHARE_PER_JOB=
"gridmix.throttle.reducess.max-slot-share-per-job";
final float maxReduceSlotSharePerJob;
/**
* The ratio of the maximum number of pending+running jobs over the number of
* task trackers.
*/
private static final float MAX_JOB_TRACKER_RATIO=1.0f;
public static final String CONF_MAX_JOB_TRACKER_RATIO=
"gridmix.throttle.jobs-to-tracker-ratio";
final float maxJobTrackerRatio;
/**
* Represents a list of blacklisted jobs. Jobs are blacklisted when either
* they are complete or their status cannot be obtained. Stress mode will
* ignore blacklisted jobs from its overload computation.
*/
private Set<JobID> blacklistedJobs = new HashSet<JobID>();
/**
* Creating a new instance does not start the thread.
*
* @param submitter Component to which deserialized jobs are passed
* @param jobProducer Stream of job traces with which to construct a
* {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
* @param scratch Directory into which to write output from simulated jobs
* @param conf Config passed to all jobs to be submitted
* @param startFlag Latch released from main to start pipeline
* @throws java.io.IOException
*/
public StressJobFactory(
JobSubmitter submitter, JobStoryProducer jobProducer, Path scratch,
Configuration conf, CountDownLatch startFlag, UserResolver resolver)
throws IOException {
super(
submitter, jobProducer, scratch, conf, startFlag, resolver);
overloadMapTaskMapSlotRatio = conf.getFloat(
CONF_OVERLOAD_MAPTASK_MAPSLOT_RATIO, OVERLOAD_MAPTASK_MAPSLOT_RATIO);
overloadReduceTaskReduceSlotRatio = conf.getFloat(
CONF_OVERLOAD_REDUCETASK_REDUCESLOT_RATIO,
OVERLOAD_REDUCETASK_REDUCESLOT_RATIO);
maxMapSlotSharePerJob = conf.getFloat(
CONF_MAX_MAPSLOT_SHARE_PER_JOB, MAX_MAPSLOT_SHARE_PER_JOB);
maxReduceSlotSharePerJob = conf.getFloat(
CONF_MAX_REDUCESLOT_SHARE_PER_JOB, MAX_REDUCESLOT_SHARE_PER_JOB);
maxJobTrackerRatio = conf.getFloat(
CONF_MAX_JOB_TRACKER_RATIO, MAX_JOB_TRACKER_RATIO);
}
public Thread createReaderThread() {
return new StressReaderThread("StressJobFactory");
}
/*
* Worker thread responsible for reading descriptions, assigning sequence
* numbers, and normalizing time.
*/
private class StressReaderThread extends Thread {
public StressReaderThread(String name) {
super(name);
}
/**
* STRESS: Submits the job in STRESS mode.
* while(JT is overloaded) {
* wait();
* }
* If not overloaded , get number of slots available.
* Keep submitting the jobs till ,total jobs is sufficient to
* load the JT.
* That is submit (Sigma(no of maps/Job)) > (2 * no of slots available)
*/
public void run() {
try {
startFlag.await();
if (Thread.currentThread().isInterrupted()) {
LOG.warn("[STRESS] Interrupted before start!. Exiting..");
return;
}
LOG.info("START STRESS @ " + System.currentTimeMillis());
while (!Thread.currentThread().isInterrupted()) {
try {
while (loadStatus.overloaded()) {
// update the overload status
if (LOG.isDebugEnabled()) {
LOG.debug("Updating the overload status.");
}
try {
checkLoadAndGetSlotsToBackfill();
} catch (IOException ioe) {
LOG.warn("[STRESS] Check failed!", ioe);
return;
}
// if the cluster is still overloaded, then sleep
if (loadStatus.overloaded()) {
if (LOG.isDebugEnabled()) {
LOG.debug("[STRESS] Cluster overloaded in run! Sleeping...");
}
// sleep
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
LOG.warn("[STRESS] Interrupted while sleeping! Exiting.", ie);
return;
}
}
}
while (!loadStatus.overloaded()) {
if (LOG.isDebugEnabled()) {
LOG.debug("[STRESS] Cluster underloaded in run! Stressing...");
}
try {
//TODO This in-line read can block submission for large jobs.
final JobStory job = getNextJobFiltered();
if (null == job) {
LOG.warn("[STRESS] Finished consuming the input trace. "
+ "Exiting..");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Job Selected: " + job.getJobID());
}
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(job.getUser());
UserGroupInformation tgtUgi = userResolver.getTargetUgi(ugi);
GridmixJob tJob =
jobCreator.createGridmixJob(conf, 0L, job, scratch,
tgtUgi, sequence.getAndIncrement());
// submit the job
submitter.add(tJob);
// TODO: We need to take care of scenario when one map/reduce
// takes more than 1 slot.
// Lock the loadjob as we are making updates
int incompleteMapTasks = (int) calcEffectiveIncompleteMapTasks(
loadStatus.getMapCapacity(),
job.getNumberMaps(), 0.0f);
loadStatus.decrementMapLoad(incompleteMapTasks);
int incompleteReduceTasks =
(int) calcEffectiveIncompleteReduceTasks(
loadStatus.getReduceCapacity(),
job.getNumberReduces(), 0.0f);
loadStatus.decrementReduceLoad(incompleteReduceTasks);
loadStatus.decrementJobLoad(1);
} catch (IOException e) {
LOG.error("[STRESS] Error while submitting the job ", e);
error = e;
return;
}
}
} finally {
// do nothing
}
}
} catch (InterruptedException e) {
LOG.error("[STRESS] Interrupted in the main block!", e);
return;
} finally {
IOUtils.cleanup(null, jobProducer);
}
}
}
/**
* STRESS Once you get the notification from StatsCollector.Collect the
* clustermetrics. Update current loadStatus with new load status of JT.
*
* @param item
*/
@Override
public void update(Statistics.ClusterStats item) {
ClusterStatus clusterStatus = item.getStatus();
try {
// update the max cluster map/reduce task capacity
loadStatus.updateMapCapacity(clusterStatus.getMaxMapTasks());
loadStatus.updateReduceCapacity(clusterStatus.getMaxReduceTasks());
int numTrackers = clusterStatus.getTaskTrackers();
int jobLoad =
(int) (maxJobTrackerRatio * numTrackers) - item.getNumRunningJob();
loadStatus.updateJobLoad(jobLoad);
} catch (Exception e) {
LOG.error("Couldn't get the new Status",e);
}
}
float calcEffectiveIncompleteMapTasks(int mapSlotCapacity,
int numMaps, float mapProgress) {
float maxEffIncompleteMapTasks = Math.max(1.0f, mapSlotCapacity
* maxMapSlotSharePerJob);
float mapProgressAdjusted = Math.max(Math.min(mapProgress, 1.0f), 0.0f);
return Math.min(maxEffIncompleteMapTasks,
numMaps * (1.0f - mapProgressAdjusted));
}
float calcEffectiveIncompleteReduceTasks(int reduceSlotCapacity,
int numReduces, float reduceProgress) {
float maxEffIncompleteReduceTasks = Math.max(1.0f, reduceSlotCapacity
* maxReduceSlotSharePerJob);
float reduceProgressAdjusted =
Math.max(Math.min(reduceProgress, 1.0f), 0.0f);
return Math.min(maxEffIncompleteReduceTasks,
numReduces * (1.0f - reduceProgressAdjusted));
}
/**
* We try to use some light-weight mechanism to determine cluster load.
*
* @throws java.io.IOException
*/
protected void checkLoadAndGetSlotsToBackfill()
throws IOException, InterruptedException {
if (loadStatus.getJobLoad() <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug(System.currentTimeMillis() + " [JobLoad] Overloaded is "
+ Boolean.TRUE.toString() + " NumJobsBackfill is "
+ loadStatus.getJobLoad());
}
return; // stop calculation because we know it is overloaded.
}
int mapCapacity = loadStatus.getMapCapacity();
int reduceCapacity = loadStatus.getReduceCapacity();
// return if the cluster status is not set
if (mapCapacity < 0 || reduceCapacity < 0) {
// note that, by default, the overload status is true
// missing cluster status will result into blocking of job submission
return;
}
// Determine the max permissible map & reduce task load
int maxMapLoad = (int) (overloadMapTaskMapSlotRatio * mapCapacity);
int maxReduceLoad =
(int) (overloadReduceTaskReduceSlotRatio * reduceCapacity);
// compute the total number of map & reduce tasks submitted
int totalMapTasks = ClusterStats.getSubmittedMapTasks();
int totalReduceTasks = ClusterStats.getSubmittedReduceTasks();
if (LOG.isDebugEnabled()) {
LOG.debug("Total submitted map tasks: " + totalMapTasks);
LOG.debug("Total submitted reduce tasks: " + totalReduceTasks);
LOG.debug("Max map load: " + maxMapLoad);
LOG.debug("Max reduce load: " + maxReduceLoad);
}
// generate a pessimistic bound on the max running+pending map tasks
// this check is to avoid the heavy-duty actual map load calculation
int mapSlotsBackFill = (int) (maxMapLoad - totalMapTasks);
// generate a pessimistic bound on the max running+pending reduce tasks
// this check is to avoid the heavy-duty actual reduce load calculation
int reduceSlotsBackFill = (int) (maxReduceLoad - totalReduceTasks);
// maintain a list of seen job ids
Set<JobID> seenJobIDs = new HashSet<JobID>();
// check if the total number of submitted map/reduce tasks exceeds the
// permissible limit
if (totalMapTasks > maxMapLoad || totalReduceTasks > maxReduceLoad) {
// if yes, calculate the real load
float incompleteMapTasks = 0; // include pending & running map tasks.
float incompleteReduceTasks = 0; // include pending & running reduce tasks
for (JobStats job : ClusterStats.getRunningJobStats()) {
JobID id = job.getJob().getJobID();
seenJobIDs.add(id);
// Note that this is a hack! Ideally, ClusterStats.getRunningJobStats()
// should be smart enough to take care of completed jobs.
if (blacklistedJobs.contains(id)) {
LOG.warn("Ignoring blacklisted job: " + id);
continue;
}
int noOfMaps = job.getNoOfMaps();
int noOfReduces = job.getNoOfReds();
// consider polling for jobs where maps>0 and reds>0
// TODO: What about setup/cleanup tasks for cases where m=0 and r=0
// What otherwise?
if (noOfMaps > 0 || noOfReduces > 0) {
// get the job's status
JobStatus status = job.getJobStatus();
// blacklist completed jobs and continue
if (status != null && status.isJobComplete()) {
LOG.warn("Blacklisting completed job: " + id);
blacklistedJobs.add(id);
continue;
}
// get the map and reduce tasks' progress
float mapProgress = 0f;
float reduceProgress = 0f;
// check if the status is missing (this can happen for unpolled jobs)
if (status != null) {
mapProgress = status.getMapProgress();
reduceProgress = status.getReduceProgress();
}
incompleteMapTasks +=
calcEffectiveIncompleteMapTasks(mapCapacity, noOfMaps, mapProgress);
// bail out early
int currentMapSlotsBackFill = (int) (maxMapLoad - incompleteMapTasks);
if (currentMapSlotsBackFill <= 0) {
// reset the reduce task load since we are bailing out
incompleteReduceTasks = totalReduceTasks;
if (LOG.isDebugEnabled()) {
LOG.debug("Terminating overload check due to high map load.");
}
break;
}
// compute the real reduce load
if (noOfReduces > 0) {
incompleteReduceTasks +=
calcEffectiveIncompleteReduceTasks(reduceCapacity, noOfReduces,
reduceProgress);
}
// bail out early
int currentReduceSlotsBackFill =
(int) (maxReduceLoad - incompleteReduceTasks);
if (currentReduceSlotsBackFill <= 0) {
// reset the map task load since we are bailing out
incompleteMapTasks = totalMapTasks;
if (LOG.isDebugEnabled()) {
LOG.debug("Terminating overload check due to high reduce load.");
}
break;
}
} else {
LOG.warn("Blacklisting empty job: " + id);
blacklistedJobs.add(id);
}
}
// calculate the real map load on the cluster
mapSlotsBackFill = (int) (maxMapLoad - incompleteMapTasks);
// calculate the real reduce load on the cluster
reduceSlotsBackFill = (int)(maxReduceLoad - incompleteReduceTasks);
// clean up the backlisted set to keep the memory footprint minimal
// retain only the jobs that are seen in this cycle
blacklistedJobs.retainAll(seenJobIDs);
if (LOG.isDebugEnabled() && blacklistedJobs.size() > 0) {
LOG.debug("Blacklisted jobs count: " + blacklistedJobs.size());
}
}
// update
loadStatus.updateMapLoad(mapSlotsBackFill);
loadStatus.updateReduceLoad(reduceSlotsBackFill);
if (loadStatus.getMapLoad() <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug(System.currentTimeMillis() + " [MAP-LOAD] Overloaded is "
+ Boolean.TRUE.toString() + " MapSlotsBackfill is "
+ loadStatus.getMapLoad());
}
return; // stop calculation because we know it is overloaded.
}
if (loadStatus.getReduceLoad() <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug(System.currentTimeMillis() + " [REDUCE-LOAD] Overloaded is "
+ Boolean.TRUE.toString() + " ReduceSlotsBackfill is "
+ loadStatus.getReduceLoad());
}
return; // stop calculation because we know it is overloaded.
}
if (LOG.isDebugEnabled()) {
LOG.debug(System.currentTimeMillis() + " [OVERALL] Overloaded is "
+ Boolean.FALSE.toString() + "Current load Status is "
+ loadStatus);
}
}
static class LoadStatus {
/**
* Additional number of map slots that can be requested before
* declaring (by Gridmix STRESS mode) the cluster as overloaded.
*/
private volatile int mapSlotsBackfill;
/**
* Determines the total map slot capacity of the cluster.
*/
private volatile int mapSlotCapacity;
/**
* Additional number of reduce slots that can be requested before
* declaring (by Gridmix STRESS mode) the cluster as overloaded.
*/
private volatile int reduceSlotsBackfill;
/**
* Determines the total reduce slot capacity of the cluster.
*/
private volatile int reduceSlotCapacity;
/**
* Determines the max count of running jobs in the cluster.
*/
private volatile int numJobsBackfill;
// set the default to true
private AtomicBoolean overloaded = new AtomicBoolean(true);
/**
* Construct the LoadStatus in an unknown state - assuming the cluster is
* overloaded by setting numSlotsBackfill=0.
*/
LoadStatus() {
mapSlotsBackfill = 0;
reduceSlotsBackfill = 0;
numJobsBackfill = 0;
mapSlotCapacity = -1;
reduceSlotCapacity = -1;
}
public synchronized int getMapLoad() {
return mapSlotsBackfill;
}
public synchronized int getMapCapacity() {
return mapSlotCapacity;
}
public synchronized int getReduceLoad() {
return reduceSlotsBackfill;
}
public synchronized int getReduceCapacity() {
return reduceSlotCapacity;
}
public synchronized int getJobLoad() {
return numJobsBackfill;
}
public synchronized void decrementMapLoad(int mapSlotsConsumed) {
this.mapSlotsBackfill -= mapSlotsConsumed;
updateOverloadStatus();
}
public synchronized void decrementReduceLoad(int reduceSlotsConsumed) {
this.reduceSlotsBackfill -= reduceSlotsConsumed;
updateOverloadStatus();
}
public synchronized void decrementJobLoad(int numJobsConsumed) {
this.numJobsBackfill -= numJobsConsumed;
updateOverloadStatus();
}
public synchronized void updateMapCapacity(int mapSlotsCapacity) {
this.mapSlotCapacity = mapSlotsCapacity;
updateOverloadStatus();
}
public synchronized void updateReduceCapacity(int reduceSlotsCapacity) {
this.reduceSlotCapacity = reduceSlotsCapacity;
updateOverloadStatus();
}
public synchronized void updateMapLoad(int mapSlotsBackfill) {
this.mapSlotsBackfill = mapSlotsBackfill;
updateOverloadStatus();
}
public synchronized void updateReduceLoad(int reduceSlotsBackfill) {
this.reduceSlotsBackfill = reduceSlotsBackfill;
updateOverloadStatus();
}
public synchronized void updateJobLoad(int numJobsBackfill) {
this.numJobsBackfill = numJobsBackfill;
updateOverloadStatus();
}
private synchronized void updateOverloadStatus() {
overloaded.set((mapSlotsBackfill <= 0) || (reduceSlotsBackfill <= 0)
|| (numJobsBackfill <= 0));
}
public boolean overloaded() {
return overloaded.get();
}
public synchronized String toString() {
// TODO Use StringBuilder instead
return " Overloaded = " + overloaded()
+ ", MapSlotBackfill = " + mapSlotsBackfill
+ ", MapSlotCapacity = " + mapSlotCapacity
+ ", ReduceSlotBackfill = " + reduceSlotsBackfill
+ ", ReduceSlotCapacity = " + reduceSlotCapacity
+ ", NumJobsBackfill = " + numJobsBackfill;
}
}
/**
* Start the reader thread, wait for latch if necessary.
*/
@Override
public void start() {
LOG.info(" Starting Stress submission ");
this.rThread.start();
}
}
| 22,827 | 36.361702 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
/**
* Given byte and record targets, emit roughly equal-sized records satisfying
* the contract.
*/
class AvgRecordFactory extends RecordFactory {
/**
* Percentage of record for key data.
*/
public static final String GRIDMIX_KEY_FRC = "gridmix.key.fraction";
public static final String GRIDMIX_MISSING_REC_SIZE =
"gridmix.missing.rec.size";
private final long targetBytes;
private final long targetRecords;
private final long step;
private final int avgrec;
private final int keyLen;
private long accBytes = 0L;
private long accRecords = 0L;
private int unspilledBytes = 0;
private int minSpilledBytes = 0;
/**
* @param targetBytes Expected byte count.
* @param targetRecords Expected record count.
* @param conf Used to resolve edge cases @see #GRIDMIX_KEY_FRC
*/
public AvgRecordFactory(long targetBytes, long targetRecords,
Configuration conf) {
this(targetBytes, targetRecords, conf, 0);
}
/**
* @param minSpilledBytes Minimum amount of data expected per record
*/
public AvgRecordFactory(long targetBytes, long targetRecords,
Configuration conf, int minSpilledBytes) {
this.targetBytes = targetBytes;
this.targetRecords = targetRecords <= 0 && this.targetBytes >= 0
? Math.max(1,
this.targetBytes / conf.getInt(GRIDMIX_MISSING_REC_SIZE, 64 * 1024))
: targetRecords;
final long tmp = this.targetBytes / this.targetRecords;
step = this.targetBytes - this.targetRecords * tmp;
avgrec = (int) Math.min(Integer.MAX_VALUE, tmp + 1);
keyLen = Math.max(1,
(int)(tmp * Math.min(1.0f, conf.getFloat(GRIDMIX_KEY_FRC, 0.1f))));
this.minSpilledBytes = minSpilledBytes;
}
@Override
public boolean next(GridmixKey key, GridmixRecord val) throws IOException {
if (accBytes >= targetBytes) {
return false;
}
final int reclen = accRecords++ >= step ? avgrec - 1 : avgrec;
final int len = (int) Math.min(targetBytes - accBytes, reclen);
unspilledBytes += len;
// len != reclen?
if (key != null) {
if (unspilledBytes < minSpilledBytes && accRecords < targetRecords) {
key.setSize(1);
val.setSize(1);
accBytes += key.getSize() + val.getSize();
unspilledBytes -= (key.getSize() + val.getSize());
} else {
key.setSize(keyLen);
val.setSize(unspilledBytes - key.getSize());
accBytes += unspilledBytes;
unspilledBytes = 0;
}
} else {
if (unspilledBytes < minSpilledBytes && accRecords < targetRecords) {
val.setSize(1);
accBytes += val.getSize();
unspilledBytes -= val.getSize();
} else {
val.setSize(unspilledBytes);
accBytes += unspilledBytes;
unspilledBytes = 0;
}
}
return true;
}
@Override
public float getProgress() throws IOException {
return Math.min(1.0f, accBytes / ((float)targetBytes));
}
@Override
public void close() throws IOException {
// noop
}
}
| 3,952 | 30.879032 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.DeprecationDelta;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* Emulation of Distributed Cache Usage in gridmix.
* <br> Emulation of Distributed Cache Load in gridmix will put load on
* TaskTrackers and affects execution time of tasks because of localization of
* distributed cache files by TaskTrackers.
* <br> Gridmix creates distributed cache files for simulated jobs by launching
* a MapReduce job {@link GenerateDistCacheData} in advance i.e. before
* launching simulated jobs.
* <br> The distributed cache file paths used in the original cluster are mapped
* to unique file names in the simulated cluster.
* <br> All HDFS-based distributed cache files generated by gridmix are
* public distributed cache files. But Gridmix makes sure that load incurred due
* to localization of private distributed cache files on the original cluster
* is also faithfully simulated. Gridmix emulates the load due to private
* distributed cache files by mapping private distributed cache files of
* different users in the original cluster to different public distributed cache
* files in the simulated cluster.
*
* <br> The configuration properties like
* {@link MRJobConfig#CACHE_FILES}, {@link MRJobConfig#CACHE_FILE_VISIBILITIES},
* {@link MRJobConfig#CACHE_FILES_SIZES} and
* {@link MRJobConfig#CACHE_FILE_TIMESTAMPS} obtained from trace are used to
* decide
* <li> file size of each distributed cache file to be generated
* <li> whether a distributed cache file is already seen in this trace file
* <li> whether a distributed cache file was considered public or private.
* <br>
* <br> Gridmix configures these generated files as distributed cache files for
* the simulated jobs.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
class DistributedCacheEmulator {
private static final Log LOG =
LogFactory.getLog(DistributedCacheEmulator.class);
static final long AVG_BYTES_PER_MAP = 128 * 1024 * 1024L;// 128MB
private Path distCachePath;
/**
* Map between simulated cluster's distributed cache file paths and their
* file sizes. Unique distributed cache files are entered into this map.
* 2 distributed cache files are considered same if and only if their
* file paths, visibilities and timestamps are same.
*/
private Map<String, Long> distCacheFiles = new HashMap<String, Long>();
/**
* Configuration property for whether gridmix should emulate
* distributed cache usage or not. Default value is true.
*/
static final String GRIDMIX_EMULATE_DISTRIBUTEDCACHE =
"gridmix.distributed-cache-emulation.enable";
// Whether to emulate distributed cache usage or not
boolean emulateDistributedCache = true;
// Whether to generate distributed cache data or not
boolean generateDistCacheData = false;
Configuration conf; // gridmix configuration
private static final Charset charsetUTF8 = Charset.forName("UTF-8");
// Pseudo local file system where local FS based distributed cache files are
// created by gridmix.
FileSystem pseudoLocalFs = null;
{
// Need to handle deprecation of these MapReduce-internal configuration
// properties as MapReduce doesn't handle their deprecation.
Configuration.addDeprecations(new DeprecationDelta[] {
new DeprecationDelta("mapred.cache.files.filesizes",
MRJobConfig.CACHE_FILES_SIZES),
new DeprecationDelta("mapred.cache.files.visibilities",
MRJobConfig.CACHE_FILE_VISIBILITIES)
});
}
/**
* @param conf gridmix configuration
* @param ioPath <ioPath>/distributedCache/ is the gridmix Distributed
* Cache directory
*/
public DistributedCacheEmulator(Configuration conf, Path ioPath) {
this.conf = conf;
distCachePath = new Path(ioPath, "distributedCache");
this.conf.setClass("fs.pseudo.impl", PseudoLocalFs.class, FileSystem.class);
}
/**
* This is to be called before any other method of DistributedCacheEmulator.
* <br> Checks if emulation of distributed cache load is needed and is feasible.
* Sets the flags generateDistCacheData and emulateDistributedCache to the
* appropriate values.
* <br> Gridmix does not emulate distributed cache load if
* <ol><li> the specific gridmix job type doesn't need emulation of
* distributed cache load OR
* <li> the trace is coming from a stream instead of file OR
* <li> the distributed cache dir where distributed cache data is to be
* generated by gridmix is on local file system OR
* <li> execute permission is not there for any of the ascendant directories
* of <ioPath> till root. This is because for emulation of distributed
* cache load, distributed cache files created under
* <ioPath/distributedCache/> should be considered by hadoop
* as public distributed cache files.
* <li> creation of pseudo local file system fails.</ol>
* <br> For (2), (3), (4) and (5), generation of distributed cache data
* is also disabled.
*
* @param traceIn trace file path. If this is '-', then trace comes from the
* stream stdin.
* @param jobCreator job creator of gridmix jobs of a specific type
* @param generate true if -generate option was specified
* @throws IOException
*/
void init(String traceIn, JobCreator jobCreator, boolean generate)
throws IOException {
emulateDistributedCache = jobCreator.canEmulateDistCacheLoad()
&& conf.getBoolean(GRIDMIX_EMULATE_DISTRIBUTEDCACHE, true);
generateDistCacheData = generate;
if (generateDistCacheData || emulateDistributedCache) {
if ("-".equals(traceIn)) {// trace is from stdin
LOG.warn("Gridmix will not emulate Distributed Cache load because "
+ "the input trace source is a stream instead of file.");
emulateDistributedCache = generateDistCacheData = false;
} else if (FileSystem.getLocal(conf).getUri().getScheme().equals(
distCachePath.toUri().getScheme())) {// local FS
LOG.warn("Gridmix will not emulate Distributed Cache load because "
+ "<iopath> provided is on local file system.");
emulateDistributedCache = generateDistCacheData = false;
} else {
// Check if execute permission is there for all the ascendant
// directories of distCachePath till root.
FileSystem fs = FileSystem.get(conf);
Path cur = distCachePath.getParent();
while (cur != null) {
if (cur.toString().length() > 0) {
FsPermission perm = fs.getFileStatus(cur).getPermission();
if (!perm.getOtherAction().and(FsAction.EXECUTE).equals(
FsAction.EXECUTE)) {
LOG.warn("Gridmix will not emulate Distributed Cache load "
+ "because the ascendant directory (of distributed cache "
+ "directory) " + cur + " doesn't have execute permission "
+ "for others.");
emulateDistributedCache = generateDistCacheData = false;
break;
}
}
cur = cur.getParent();
}
}
}
// Check if pseudo local file system can be created
try {
pseudoLocalFs = FileSystem.get(new URI("pseudo:///"), conf);
} catch (URISyntaxException e) {
LOG.warn("Gridmix will not emulate Distributed Cache load because "
+ "creation of pseudo local file system failed.");
e.printStackTrace();
emulateDistributedCache = generateDistCacheData = false;
return;
}
}
/**
* @return true if gridmix should emulate distributed cache load
*/
boolean shouldEmulateDistCacheLoad() {
return emulateDistributedCache;
}
/**
* @return true if gridmix should generate distributed cache data
*/
boolean shouldGenerateDistCacheData() {
return generateDistCacheData;
}
/**
* @return the distributed cache directory path
*/
Path getDistributedCacheDir() {
return distCachePath;
}
/**
* Create distributed cache directories.
* Also create a file that contains the list of distributed cache files
* that will be used as distributed cache files for all the simulated jobs.
* @param jsp job story producer for the trace
* @return exit code
* @throws IOException
*/
int setupGenerateDistCacheData(JobStoryProducer jsp)
throws IOException {
createDistCacheDirectory();
return buildDistCacheFilesList(jsp);
}
/**
* Create distributed cache directory where distributed cache files will be
* created by the MapReduce job {@link GenerateDistCacheData#JOB_NAME}.
* @throws IOException
*/
private void createDistCacheDirectory() throws IOException {
FileSystem fs = FileSystem.get(conf);
FileSystem.mkdirs(fs, distCachePath, new FsPermission((short) 0777));
}
/**
* Create the list of unique distributed cache files needed for all the
* simulated jobs and write the list to a special file.
* @param jsp job story producer for the trace
* @return exit code
* @throws IOException
*/
private int buildDistCacheFilesList(JobStoryProducer jsp) throws IOException {
// Read all the jobs from the trace file and build the list of unique
// distributed cache files.
JobStory jobStory;
while ((jobStory = jsp.getNextJob()) != null) {
if (jobStory.getOutcome() == Pre21JobHistoryConstants.Values.SUCCESS &&
jobStory.getSubmissionTime() >= 0) {
updateHDFSDistCacheFilesList(jobStory);
}
}
jsp.close();
return writeDistCacheFilesList();
}
/**
* For the job to be simulated, identify the needed distributed cache files by
* mapping original cluster's distributed cache file paths to the simulated cluster's
* paths and add these paths in the map {@code distCacheFiles}.
*<br>
* JobStory should contain distributed cache related properties like
* <li> {@link MRJobConfig#CACHE_FILES}
* <li> {@link MRJobConfig#CACHE_FILE_VISIBILITIES}
* <li> {@link MRJobConfig#CACHE_FILES_SIZES}
* <li> {@link MRJobConfig#CACHE_FILE_TIMESTAMPS}
* <li> {@link MRJobConfig#CLASSPATH_FILES}
*
* <li> {@link MRJobConfig#CACHE_ARCHIVES}
* <li> {@link MRJobConfig#CACHE_ARCHIVES_VISIBILITIES}
* <li> {@link MRJobConfig#CACHE_ARCHIVES_SIZES}
* <li> {@link MRJobConfig#CACHE_ARCHIVES_TIMESTAMPS}
* <li> {@link MRJobConfig#CLASSPATH_ARCHIVES}
*
* <li> {@link MRJobConfig#CACHE_SYMLINK}
*
* @param jobdesc JobStory of original job obtained from trace
* @throws IOException
*/
void updateHDFSDistCacheFilesList(JobStory jobdesc) throws IOException {
// Map original job's distributed cache file paths to simulated cluster's
// paths, to be used by this simulated job.
JobConf jobConf = jobdesc.getJobConf();
String[] files = jobConf.getStrings(MRJobConfig.CACHE_FILES);
if (files != null) {
String[] fileSizes = jobConf.getStrings(MRJobConfig.CACHE_FILES_SIZES);
String[] visibilities =
jobConf.getStrings(MRJobConfig.CACHE_FILE_VISIBILITIES);
String[] timeStamps =
jobConf.getStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS);
FileSystem fs = FileSystem.get(conf);
String user = jobConf.getUser();
for (int i = 0; i < files.length; i++) {
// Check if visibilities are available because older hadoop versions
// didn't have public, private Distributed Caches separately.
boolean visibility =
(visibilities == null) ? true : Boolean.valueOf(visibilities[i]);
if (isLocalDistCacheFile(files[i], user, visibility)) {
// local FS based distributed cache file.
// Create this file on the pseudo local FS on the fly (i.e. when the
// simulated job is submitted).
continue;
}
// distributed cache file on hdfs
String mappedPath = mapDistCacheFilePath(files[i], timeStamps[i],
visibility, user);
// No need to add a distributed cache file path to the list if
// (1) the mapped path is already there in the list OR
// (2) the file with the mapped path already exists.
// In any of the above 2 cases, file paths, timestamps, file sizes and
// visibilities match. File sizes should match if file paths and
// timestamps match because single file path with single timestamp
// should correspond to a single file size.
if (distCacheFiles.containsKey(mappedPath) ||
fs.exists(new Path(mappedPath))) {
continue;
}
distCacheFiles.put(mappedPath, Long.valueOf(fileSizes[i]));
}
}
}
/**
* Check if the file path provided was constructed by MapReduce for a
* distributed cache file on local file system.
* @param filePath path of the distributed cache file
* @param user job submitter of the job for which <filePath> is a
* distributed cache file
* @param visibility <code>true</code> for public distributed cache file
* @return true if the path provided is of a local file system based
* distributed cache file
*/
static boolean isLocalDistCacheFile(String filePath, String user,
boolean visibility) {
return (!visibility && filePath.contains(user + "/.staging"));
}
/**
* Map the HDFS based distributed cache file path from original cluster to
* a unique file name on the simulated cluster.
* <br> Unique distributed file names on simulated cluster are generated
* using original cluster's <li>file path, <li>timestamp and <li> the
* job-submitter for private distributed cache file.
* <br> This implies that if on original cluster, a single HDFS file
* considered as two private distributed cache files for two jobs of
* different users, then the corresponding simulated jobs will have two
* different files of the same size in public distributed cache, one for each
* user. Both these simulated jobs will not share these distributed cache
* files, thus leading to the same load as seen in the original cluster.
* @param file distributed cache file path
* @param timeStamp time stamp of dist cachce file
* @param isPublic true if this distributed cache file is a public
* distributed cache file
* @param user job submitter on original cluster
* @return the mapped path on simulated cluster
*/
private String mapDistCacheFilePath(String file, String timeStamp,
boolean isPublic, String user) {
String id = file + timeStamp;
if (!isPublic) {
// consider job-submitter for private distributed cache file
id = id.concat(user);
}
return new Path(distCachePath, MD5Hash.digest(id).toString()).toUri()
.getPath();
}
/**
* Write the list of distributed cache files in the decreasing order of
* file sizes into the sequence file. This file will be input to the job
* {@link GenerateDistCacheData}.
* Also validates if -generate option is missing and distributed cache files
* are missing.
* @return exit code
* @throws IOException
*/
private int writeDistCacheFilesList()
throws IOException {
// Sort the distributed cache files in the decreasing order of file sizes.
List dcFiles = new ArrayList(distCacheFiles.entrySet());
Collections.sort(dcFiles, new Comparator() {
public int compare(Object dc1, Object dc2) {
return ((Comparable) ((Map.Entry) (dc2)).getValue())
.compareTo(((Map.Entry) (dc1)).getValue());
}
});
// write the sorted distributed cache files to the sequence file
FileSystem fs = FileSystem.get(conf);
Path distCacheFilesList = new Path(distCachePath, "_distCacheFiles.txt");
conf.set(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_LIST,
distCacheFilesList.toString());
SequenceFile.Writer src_writer = SequenceFile.createWriter(fs, conf,
distCacheFilesList, LongWritable.class, BytesWritable.class,
SequenceFile.CompressionType.NONE);
// Total number of unique distributed cache files
int fileCount = dcFiles.size();
long byteCount = 0;// Total size of all distributed cache files
long bytesSync = 0;// Bytes after previous sync;used to add sync marker
for (Iterator it = dcFiles.iterator(); it.hasNext();) {
Map.Entry entry = (Map.Entry)it.next();
LongWritable fileSize =
new LongWritable(Long.parseLong(entry.getValue().toString()));
BytesWritable filePath =
new BytesWritable(
entry.getKey().toString().getBytes(charsetUTF8));
byteCount += fileSize.get();
bytesSync += fileSize.get();
if (bytesSync > AVG_BYTES_PER_MAP) {
src_writer.sync();
bytesSync = fileSize.get();
}
src_writer.append(fileSize, filePath);
}
if (src_writer != null) {
src_writer.close();
}
// Set delete on exit for 'dist cache files list' as it is not needed later.
fs.deleteOnExit(distCacheFilesList);
conf.setInt(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_COUNT, fileCount);
conf.setLong(GenerateDistCacheData.GRIDMIX_DISTCACHE_BYTE_COUNT, byteCount);
LOG.info("Number of HDFS based distributed cache files to be generated is "
+ fileCount + ". Total size of HDFS based distributed cache files "
+ "to be generated is " + byteCount);
if (!shouldGenerateDistCacheData() && fileCount > 0) {
LOG.error("Missing " + fileCount + " distributed cache files under the "
+ " directory\n" + distCachePath + "\nthat are needed for gridmix"
+ " to emulate distributed cache load. Either use -generate\noption"
+ " to generate distributed cache data along with input data OR "
+ "disable\ndistributed cache emulation by configuring '"
+ DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
+ "' to false.");
return Gridmix.MISSING_DIST_CACHE_FILES_ERROR;
}
return 0;
}
/**
* If gridmix needs to emulate distributed cache load, then configure
* distributed cache files of a simulated job by mapping the original
* cluster's distributed cache file paths to the simulated cluster's paths and
* setting these mapped paths in the job configuration of the simulated job.
* <br>
* Configure local FS based distributed cache files through the property
* "tmpfiles" and hdfs based distributed cache files through the property
* {@link MRJobConfig#CACHE_FILES}.
* @param conf configuration for the simulated job to be run
* @param jobConf job configuration of original cluster's job, obtained from
* trace
* @throws IOException
*/
void configureDistCacheFiles(Configuration conf, JobConf jobConf)
throws IOException {
if (shouldEmulateDistCacheLoad()) {
String[] files = jobConf.getStrings(MRJobConfig.CACHE_FILES);
if (files != null) {
// hdfs based distributed cache files to be configured for simulated job
List<String> cacheFiles = new ArrayList<String>();
// local FS based distributed cache files to be configured for
// simulated job
List<String> localCacheFiles = new ArrayList<String>();
String[] visibilities =
jobConf.getStrings(MRJobConfig.CACHE_FILE_VISIBILITIES);
String[] timeStamps =
jobConf.getStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS);
String[] fileSizes = jobConf.getStrings(MRJobConfig.CACHE_FILES_SIZES);
String user = jobConf.getUser();
for (int i = 0; i < files.length; i++) {
// Check if visibilities are available because older hadoop versions
// didn't have public, private Distributed Caches separately.
boolean visibility =
(visibilities == null) ? true : Boolean.valueOf(visibilities[i]);
if (isLocalDistCacheFile(files[i], user, visibility)) {
// local FS based distributed cache file.
// Create this file on the pseudo local FS.
String fileId = MD5Hash.digest(files[i] + timeStamps[i]).toString();
long fileSize = Long.parseLong(fileSizes[i]);
Path mappedLocalFilePath =
PseudoLocalFs.generateFilePath(fileId, fileSize)
.makeQualified(pseudoLocalFs.getUri(),
pseudoLocalFs.getWorkingDirectory());
pseudoLocalFs.create(mappedLocalFilePath);
localCacheFiles.add(mappedLocalFilePath.toUri().toString());
} else {
// hdfs based distributed cache file.
// Get the mapped HDFS path on simulated cluster
String mappedPath = mapDistCacheFilePath(files[i], timeStamps[i],
visibility, user);
cacheFiles.add(mappedPath);
}
}
if (cacheFiles.size() > 0) {
// configure hdfs based distributed cache files for simulated job
conf.setStrings(MRJobConfig.CACHE_FILES,
cacheFiles.toArray(new String[cacheFiles.size()]));
}
if (localCacheFiles.size() > 0) {
// configure local FS based distributed cache files for simulated job
conf.setStrings("tmpfiles", localCacheFiles.toArray(
new String[localCacheFiles.size()]));
}
}
}
}
}
| 23,506 | 41.662432 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.LineReader;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
public class RoundRobinUserResolver implements UserResolver {
public static final Log LOG = LogFactory.getLog(RoundRobinUserResolver.class);
private int uidx = 0;
private List<UserGroupInformation> users = Collections.emptyList();
/**
* Mapping between user names of original cluster and UGIs of proxy users of
* simulated cluster
*/
private final HashMap<String,UserGroupInformation> usercache =
new HashMap<String,UserGroupInformation>();
/**
* Userlist assumes one user per line.
* Each line in users-list-file is of the form <username>[,group]*
* <br> Group names are ignored(they are not parsed at all).
*/
private List<UserGroupInformation> parseUserList(URI userUri,
Configuration conf)
throws IOException {
if (null == userUri) {
return Collections.emptyList();
}
final Path userloc = new Path(userUri.toString());
final Text rawUgi = new Text();
final FileSystem fs = userloc.getFileSystem(conf);
final ArrayList<UserGroupInformation> ugiList =
new ArrayList<UserGroupInformation>();
LineReader in = null;
try {
in = new LineReader(fs.open(userloc));
while (in.readLine(rawUgi) > 0) {//line is of the form username[,group]*
if(rawUgi.toString().trim().equals("")) {
continue; //Continue on empty line
}
// e is end position of user name in this line
int e = rawUgi.find(",");
if (e == 0) {
throw new IOException("Missing username: " + rawUgi);
}
if (e == -1) {
e = rawUgi.getLength();
}
final String username = Text.decode(rawUgi.getBytes(), 0, e).trim();
UserGroupInformation ugi = null;
try {
ugi = UserGroupInformation.createProxyUser(username,
UserGroupInformation.getLoginUser());
} catch (IOException ioe) {
LOG.error("Error while creating a proxy user " ,ioe);
}
if (ugi != null) {
ugiList.add(ugi);
}
// No need to parse groups, even if they exist. Go to next line
}
} finally {
if (in != null) {
in.close();
}
}
return ugiList;
}
@Override
public synchronized boolean setTargetUsers(URI userloc, Configuration conf)
throws IOException {
uidx = 0;
users = parseUserList(userloc, conf);
if (users.size() == 0) {
throw new IOException(buildEmptyUsersErrorMsg(userloc));
}
usercache.clear();
return true;
}
static String buildEmptyUsersErrorMsg(URI userloc) {
return "Empty user list is not allowed for RoundRobinUserResolver. Provided"
+ " user resource URI '" + userloc + "' resulted in an empty user list.";
}
@Override
public synchronized UserGroupInformation getTargetUgi(
UserGroupInformation ugi) {
// UGI of proxy user
UserGroupInformation targetUGI = usercache.get(ugi.getUserName());
if (targetUGI == null) {
targetUGI = users.get(uidx++ % users.size());
usercache.put(ugi.getUserName(), targetUGI);
}
return targetUGI;
}
/**
* {@inheritDoc}
* <p>
* {@link RoundRobinUserResolver} needs to map the users in the
* trace to the provided list of target users. So user list is needed.
*/
public boolean needsTargetUsersList() {
return true;
}
}
| 4,751 | 32.702128 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/InputStriper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Given a {@link #FilePool}, obtain a set of files capable of satisfying
* a full set of splits, then iterate over each source to fill the request.
*/
class InputStriper {
public static final Log LOG = LogFactory.getLog(InputStriper.class);
int idx;
long currentStart;
FileStatus current;
final List<FileStatus> files = new ArrayList<FileStatus>();
final Configuration conf = new Configuration();
/**
* @param inputDir Pool from which files are requested.
* @param mapBytes Sum of all expected split requests.
*/
InputStriper(FilePool inputDir, long mapBytes)
throws IOException {
final long inputBytes = inputDir.getInputFiles(mapBytes, files);
if (mapBytes > inputBytes) {
LOG.warn("Using " + inputBytes + "/" + mapBytes + " bytes");
}
if (files.isEmpty() && mapBytes > 0) {
throw new IOException("Failed to satisfy request for " + mapBytes);
}
current = files.isEmpty() ? null : files.get(0);
}
/**
* @param inputDir Pool used to resolve block locations.
* @param bytes Target byte count
* @param nLocs Number of block locations per split.
* @return A set of files satisfying the byte count, with locations weighted
* to the dominating proportion of input bytes.
*/
CombineFileSplit splitFor(FilePool inputDir, long bytes, int nLocs)
throws IOException {
final ArrayList<Path> paths = new ArrayList<Path>();
final ArrayList<Long> start = new ArrayList<Long>();
final ArrayList<Long> length = new ArrayList<Long>();
final HashMap<String,Double> sb = new HashMap<String,Double>();
do {
paths.add(current.getPath());
start.add(currentStart);
final long fromFile = Math.min(bytes, current.getLen() - currentStart);
length.add(fromFile);
for (BlockLocation loc :
inputDir.locationsFor(current, currentStart, fromFile)) {
final double tedium = loc.getLength() / (1.0 * bytes);
for (String l : loc.getHosts()) {
Double j = sb.get(l);
if (null == j) {
sb.put(l, tedium);
} else {
sb.put(l, j.doubleValue() + tedium);
}
}
}
currentStart += fromFile;
bytes -= fromFile;
// Switch to a new file if
// - the current file is uncompressed and completely used
// - the current file is compressed
CompressionCodecFactory compressionCodecs =
new CompressionCodecFactory(conf);
CompressionCodec codec = compressionCodecs.getCodec(current.getPath());
if (current.getLen() - currentStart == 0
|| codec != null) {
current = files.get(++idx % files.size());
currentStart = 0;
}
} while (bytes > 0);
final ArrayList<Entry<String,Double>> sort =
new ArrayList<Entry<String,Double>>(sb.entrySet());
Collections.sort(sort, hostRank);
final String[] hosts = new String[Math.min(nLocs, sort.size())];
for (int i = 0; i < nLocs && i < sort.size(); ++i) {
hosts[i] = sort.get(i).getKey();
}
return new CombineFileSplit(paths.toArray(new Path[0]),
toLongArray(start), toLongArray(length), hosts);
}
private long[] toLongArray(final ArrayList<Long> sigh) {
final long[] ret = new long[sigh.size()];
for (int i = 0; i < ret.length; ++i) {
ret[i] = sigh.get(i);
}
return ret;
}
static final Comparator<Entry<String,Double>> hostRank =
new Comparator<Entry<String,Double>>() {
public int compare(Entry<String,Double> a, Entry<String,Double> b) {
final double va = a.getValue();
final double vb = b.getValue();
return va > vb ? -1 : va < vb ? 1 : 0;
}
};
}
| 5,194 | 36.107143 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.nio.channels.ClosedByInterruptException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobStatus;
/**
* Component accepting submitted, running {@link Statistics.JobStats} and
* responsible for monitoring jobs for success and failure. Once a job is
* submitted, it is polled for status until complete. If a job is complete,
* then the monitor thread returns immediately to the queue. If not, the monitor
* will sleep for some duration.
*
* {@link JobMonitor} can be configured to use multiple threads for polling
* the job statuses. Use {@link Gridmix#GRIDMIX_JOBMONITOR_THREADS} to specify
* the total number of monitoring threads.
*
* The duration for which a monitoring thread sleeps if the first job in the
* queue is running can also be configured. Use
* {@link Gridmix#GRIDMIX_JOBMONITOR_SLEEPTIME_MILLIS} to specify a custom
* value.
*/
class JobMonitor implements Gridmix.Component<JobStats> {
public static final Log LOG = LogFactory.getLog(JobMonitor.class);
private final Queue<JobStats> mJobs;
private ExecutorService executor;
private int numPollingThreads;
private final BlockingQueue<JobStats> runningJobs;
private final long pollDelayMillis;
private Statistics statistics;
private boolean graceful = false;
private boolean shutdown = false;
/**
* Create a JobMonitor that sleeps for the specified duration after
* polling a still-running job.
* @param pollDelay Delay after polling a running job
* @param unit Time unit for pollDelaySec (rounded to milliseconds)
* @param statistics StatCollector , listener to job completion.
*/
public JobMonitor(int pollDelay, TimeUnit unit, Statistics statistics,
int numPollingThreads) {
executor = Executors.newCachedThreadPool();
this.numPollingThreads = numPollingThreads;
runningJobs = new LinkedBlockingQueue<JobStats>();
mJobs = new LinkedList<JobStats>();
this.pollDelayMillis = TimeUnit.MILLISECONDS.convert(pollDelay, unit);
this.statistics = statistics;
}
/**
* Add a running job's status to the polling queue.
*/
public void add(JobStats job) throws InterruptedException {
runningJobs.put(job);
}
/**
* Add a submission failed job's status, such that it can be communicated
* back to serial.
* TODO: Cleaner solution for this problem
* @param job
*/
public void submissionFailed(JobStats job) {
String jobID = job.getJob().getConfiguration().get(Gridmix.ORIGINAL_JOB_ID);
LOG.info("Job submission failed notification for job " + jobID);
synchronized (statistics) {
this.statistics.add(job);
}
}
/**
* Temporary hook for recording job success.
*/
protected void onSuccess(Job job) {
LOG.info(job.getJobName() + " (" + job.getJobID() + ")" + " success");
}
/**
* Temporary hook for recording job failure.
*/
protected void onFailure(Job job) {
LOG.info(job.getJobName() + " (" + job.getJobID() + ")" + " failure");
}
/**
* If shutdown before all jobs have completed, any still-running jobs
* may be extracted from the component.
* @throws IllegalStateException If monitoring thread is still running.
* @return Any jobs submitted and not known to have completed.
*/
List<JobStats> getRemainingJobs() {
synchronized (mJobs) {
return new ArrayList<JobStats>(mJobs);
}
}
/**
* Monitoring thread pulling running jobs from the component and into
* a queue to be polled for status.
*/
private class MonitorThread extends Thread {
public MonitorThread(int i) {
super("GridmixJobMonitor-" + i);
}
@Override
public void run() {
boolean graceful;
boolean shutdown;
while (true) {
try {
synchronized (mJobs) {
graceful = JobMonitor.this.graceful;
shutdown = JobMonitor.this.shutdown;
runningJobs.drainTo(mJobs);
}
// shutdown conditions; either shutdown requested and all jobs
// have completed or abort requested and there are recently
// submitted jobs not in the monitored set
if (shutdown) {
if (!graceful) {
while (!runningJobs.isEmpty()) {
synchronized (mJobs) {
runningJobs.drainTo(mJobs);
}
}
break;
}
synchronized (mJobs) {
if (graceful && mJobs.isEmpty()) {
break;
}
}
}
JobStats jobStats = null;
synchronized (mJobs) {
jobStats = mJobs.poll();
}
while (jobStats != null) {
Job job = jobStats.getJob();
try {
// get the job status
long start = System.currentTimeMillis();
JobStatus status = job.getStatus(); // cache the job status
long end = System.currentTimeMillis();
if (LOG.isDebugEnabled()) {
LOG.debug("Status polling for job " + job.getJobID() + " took "
+ (end-start) + "ms.");
}
// update the job progress
jobStats.updateJobStatus(status);
// if the job is complete, let others know
if (status.isJobComplete()) {
if (status.getState() == JobStatus.State.SUCCEEDED) {
onSuccess(job);
} else {
onFailure(job);
}
synchronized (statistics) {
statistics.add(jobStats);
}
} else {
// add the running job back and break
synchronized (mJobs) {
if (!mJobs.offer(jobStats)) {
LOG.error("Lost job " + (null == job.getJobName()
? "<unknown>" : job.getJobName())); // should never
// happen
}
}
break;
}
} catch (IOException e) {
if (e.getCause() instanceof ClosedByInterruptException) {
// Job doesn't throw InterruptedException, but RPC socket layer
// is blocking and may throw a wrapped Exception if this thread
// is interrupted. Since the lower level cleared the flag,
// reset it here
Thread.currentThread().interrupt();
} else {
LOG.warn("Lost job " + (null == job.getJobName()
? "<unknown>" : job.getJobName()), e);
synchronized (statistics) {
statistics.add(jobStats);
}
}
}
// get the next job
synchronized (mJobs) {
jobStats = mJobs.poll();
}
}
// sleep for a while before checking again
try {
TimeUnit.MILLISECONDS.sleep(pollDelayMillis);
} catch (InterruptedException e) {
shutdown = true;
continue;
}
} catch (Throwable e) {
LOG.warn("Unexpected exception: ", e);
}
}
}
}
/**
* Start the internal, monitoring thread.
*/
public void start() {
for (int i = 0; i < numPollingThreads; ++i) {
executor.execute(new MonitorThread(i));
}
}
/**
* Wait for the monitor to halt, assuming shutdown or abort have been
* called. Note that, since submission may be sporatic, this will hang
* if no form of shutdown has been requested.
*/
public void join(long millis) throws InterruptedException {
executor.awaitTermination(millis, TimeUnit.MILLISECONDS);
}
/**
* Drain all submitted jobs to a queue and stop the monitoring thread.
* Upstream submitter is assumed dead.
*/
public void abort() {
synchronized (mJobs) {
graceful = false;
shutdown = true;
}
executor.shutdown();
}
/**
* When all monitored jobs have completed, stop the monitoring thread.
* Upstream submitter is assumed dead.
*/
public void shutdown() {
synchronized (mJobs) {
graceful = true;
shutdown = true;
}
executor.shutdown();
}
}
| 9,807 | 32.247458 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
// TODO can replace with form of GridmixJob
class GenerateData extends GridmixJob {
/**
* Total bytes to write.
*/
public static final String GRIDMIX_GEN_BYTES = "gridmix.gen.bytes";
/**
* Maximum size per file written.
*/
public static final String GRIDMIX_GEN_CHUNK = "gridmix.gen.bytes.per.file";
/**
* Size of writes to output file.
*/
public static final String GRIDMIX_VAL_BYTES = "gendata.val.bytes";
/**
* Status reporting interval, in megabytes.
*/
public static final String GRIDMIX_GEN_INTERVAL = "gendata.interval.mb";
/**
* Blocksize of generated data.
*/
public static final String GRIDMIX_GEN_BLOCKSIZE = "gridmix.gen.blocksize";
/**
* Replication of generated data.
*/
public static final String GRIDMIX_GEN_REPLICATION = "gridmix.gen.replicas";
static final String JOB_NAME = "GRIDMIX_GENERATE_INPUT_DATA";
public GenerateData(Configuration conf, Path outdir, long genbytes)
throws IOException {
super(conf, 0L, JOB_NAME);
job.getConfiguration().setLong(GRIDMIX_GEN_BYTES, genbytes);
FileOutputFormat.setOutputPath(job, outdir);
}
/**
* Represents the input data characteristics.
*/
static class DataStatistics {
private long dataSize;
private long numFiles;
private boolean isDataCompressed;
DataStatistics(long dataSize, long numFiles, boolean isCompressed) {
this.dataSize = dataSize;
this.numFiles = numFiles;
this.isDataCompressed = isCompressed;
}
long getDataSize() {
return dataSize;
}
long getNumFiles() {
return numFiles;
}
boolean isDataCompressed() {
return isDataCompressed;
}
}
/**
* Publish the data statistics.
*/
static DataStatistics publishDataStatistics(Path inputDir, long genBytes,
Configuration conf)
throws IOException {
if (CompressionEmulationUtil.isCompressionEmulationEnabled(conf)) {
return CompressionEmulationUtil.publishCompressedDataStatistics(inputDir,
conf, genBytes);
} else {
return publishPlainDataStatistics(conf, inputDir);
}
}
static DataStatistics publishPlainDataStatistics(Configuration conf,
Path inputDir)
throws IOException {
FileSystem fs = inputDir.getFileSystem(conf);
// obtain input data file statuses
long dataSize = 0;
long fileCount = 0;
RemoteIterator<LocatedFileStatus> iter = fs.listFiles(inputDir, true);
PathFilter filter = new Utils.OutputFileUtils.OutputFilesFilter();
while (iter.hasNext()) {
LocatedFileStatus lStatus = iter.next();
if (filter.accept(lStatus.getPath())) {
dataSize += lStatus.getLen();
++fileCount;
}
}
// publish the plain data statistics
LOG.info("Total size of input data : "
+ StringUtils.humanReadableInt(dataSize));
LOG.info("Total number of input data files : " + fileCount);
return new DataStatistics(dataSize, fileCount, false);
}
@Override
public Job call() throws IOException, InterruptedException,
ClassNotFoundException {
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
ugi.doAs( new PrivilegedExceptionAction <Job>() {
public Job run() throws IOException, ClassNotFoundException,
InterruptedException {
// check if compression emulation is enabled
if (CompressionEmulationUtil
.isCompressionEmulationEnabled(job.getConfiguration())) {
CompressionEmulationUtil.configure(job);
} else {
configureRandomBytesDataGenerator();
}
job.submit();
return job;
}
private void configureRandomBytesDataGenerator() {
job.setMapperClass(GenDataMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(BytesWritable.class);
job.setInputFormatClass(GenDataFormat.class);
job.setOutputFormatClass(RawBytesOutputFormat.class);
job.setJarByClass(GenerateData.class);
try {
FileInputFormat.addInputPath(job, new Path("ignored"));
} catch (IOException e) {
LOG.error("Error while adding input path ", e);
}
}
});
return job;
}
@Override
protected boolean canEmulateCompression() {
return false;
}
public static class GenDataMapper
extends Mapper<NullWritable,LongWritable,NullWritable,BytesWritable> {
private BytesWritable val;
private final Random r = new Random();
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
val = new BytesWritable(new byte[
context.getConfiguration().getInt(GRIDMIX_VAL_BYTES, 1024 * 1024)]);
}
@Override
public void map(NullWritable key, LongWritable value, Context context)
throws IOException, InterruptedException {
for (long bytes = value.get(); bytes > 0; bytes -= val.getLength()) {
r.nextBytes(val.getBytes());
val.setSize((int)Math.min(val.getLength(), bytes));
context.write(key, val);
}
}
}
static class GenDataFormat extends InputFormat<NullWritable,LongWritable> {
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
final JobClient client =
new JobClient(new JobConf(jobCtxt.getConfiguration()));
ClusterStatus stat = client.getClusterStatus(true);
final long toGen =
jobCtxt.getConfiguration().getLong(GRIDMIX_GEN_BYTES, -1);
if (toGen < 0) {
throw new IOException("Invalid/missing generation bytes: " + toGen);
}
final int nTrackers = stat.getTaskTrackers();
final long bytesPerTracker = toGen / nTrackers;
final ArrayList<InputSplit> splits = new ArrayList<InputSplit>(nTrackers);
final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*");
final Matcher m = trackerPattern.matcher("");
for (String tracker : stat.getActiveTrackerNames()) {
m.reset(tracker);
if (!m.find()) {
System.err.println("Skipping node: " + tracker);
continue;
}
final String name = m.group(1);
splits.add(new GenSplit(bytesPerTracker, new String[] { name }));
}
return splits;
}
@Override
public RecordReader<NullWritable,LongWritable> createRecordReader(
InputSplit split, final TaskAttemptContext taskContext)
throws IOException {
return new RecordReader<NullWritable,LongWritable>() {
long written = 0L;
long write = 0L;
long RINTERVAL;
long toWrite;
final NullWritable key = NullWritable.get();
final LongWritable val = new LongWritable();
@Override
public void initialize(InputSplit split, TaskAttemptContext ctxt)
throws IOException, InterruptedException {
toWrite = split.getLength();
RINTERVAL = ctxt.getConfiguration().getInt(
GRIDMIX_GEN_INTERVAL, 10) << 20;
}
@Override
public boolean nextKeyValue() throws IOException {
written += write;
write = Math.min(toWrite - written, RINTERVAL);
val.set(write);
return written < toWrite;
}
@Override
public float getProgress() throws IOException {
return written / ((float)toWrite);
}
@Override
public NullWritable getCurrentKey() { return key; }
@Override
public LongWritable getCurrentValue() { return val; }
@Override
public void close() throws IOException {
taskContext.setStatus("Wrote " + toWrite);
}
};
}
}
static class GenSplit extends InputSplit implements Writable {
private long bytes;
private int nLoc;
private String[] locations;
public GenSplit() { }
public GenSplit(long bytes, String[] locations) {
this(bytes, locations.length, locations);
}
public GenSplit(long bytes, int nLoc, String[] locations) {
this.bytes = bytes;
this.nLoc = nLoc;
this.locations = Arrays.copyOf(locations, nLoc);
}
@Override
public long getLength() {
return bytes;
}
@Override
public String[] getLocations() {
return locations;
}
@Override
public void readFields(DataInput in) throws IOException {
bytes = in.readLong();
nLoc = in.readInt();
if (null == locations || locations.length < nLoc) {
locations = new String[nLoc];
}
for (int i = 0; i < nLoc; ++i) {
locations[i] = Text.readString(in);
}
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(bytes);
out.writeInt(nLoc);
for (int i = 0; i < nLoc; ++i) {
Text.writeString(out, locations[i]);
}
}
}
static class RawBytesOutputFormat
extends FileOutputFormat<NullWritable,BytesWritable> {
@Override
public RecordWriter<NullWritable,BytesWritable> getRecordWriter(
TaskAttemptContext job) throws IOException {
return new ChunkWriter(getDefaultWorkFile(job, ""),
job.getConfiguration());
}
static class ChunkWriter extends RecordWriter<NullWritable,BytesWritable> {
private final Path outDir;
private final FileSystem fs;
private final int blocksize;
private final short replicas;
private final FsPermission genPerms = new FsPermission((short) 0777);
private final long maxFileBytes;
private long accFileBytes = 0L;
private long fileIdx = -1L;
private OutputStream fileOut = null;
public ChunkWriter(Path outDir, Configuration conf) throws IOException {
this.outDir = outDir;
fs = outDir.getFileSystem(conf);
blocksize = conf.getInt(GRIDMIX_GEN_BLOCKSIZE, 1 << 28);
replicas = (short) conf.getInt(GRIDMIX_GEN_REPLICATION, 3);
maxFileBytes = conf.getLong(GRIDMIX_GEN_CHUNK, 1L << 30);
nextDestination();
}
private void nextDestination() throws IOException {
if (fileOut != null) {
fileOut.close();
}
fileOut = fs.create(new Path(outDir, "segment-" + (++fileIdx)),
genPerms, false, 64 * 1024, replicas,
blocksize, null);
accFileBytes = 0L;
}
@Override
public void write(NullWritable key, BytesWritable value)
throws IOException {
int written = 0;
final int total = value.getLength();
while (written < total) {
if (accFileBytes >= maxFileBytes) {
nextDestination();
}
final int write = (int)
Math.min(total - written, maxFileBytes - accFileBytes);
fileOut.write(value.getBytes(), written, write);
written += write;
accFileBytes += write;
}
}
@Override
public void close(TaskAttemptContext ctxt) throws IOException {
fileOut.close();
}
}
}
}
| 13,779 | 32.365617 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values;
import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
import org.apache.hadoop.tools.rumen.TaskInfo;
import org.apache.hadoop.tools.rumen.ZombieJobProducer;
import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Component reading job traces generated by Rumen. Each job in the trace is
* assigned a sequence number and given a submission time relative to the
* job that preceded it. Jobs are enqueued in the JobSubmitter provided at
* construction.
* @see org.apache.hadoop.tools.rumen.HadoopLogsAnalyzer
*/
abstract class JobFactory<T> implements Gridmix.Component<Void>,StatListener<T> {
public static final Log LOG = LogFactory.getLog(JobFactory.class);
protected final Path scratch;
protected final float rateFactor;
protected final Configuration conf;
protected final Thread rThread;
protected final AtomicInteger sequence;
protected final JobSubmitter submitter;
protected final CountDownLatch startFlag;
protected final UserResolver userResolver;
protected final JobCreator jobCreator;
protected volatile IOException error = null;
protected final JobStoryProducer jobProducer;
protected final ReentrantLock lock = new ReentrantLock(true);
protected int numJobsInTrace = 0;
/**
* Creating a new instance does not start the thread.
* @param submitter Component to which deserialized jobs are passed
* @param jobTrace Stream of job traces with which to construct a
* {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
* @param scratch Directory into which to write output from simulated jobs
* @param conf Config passed to all jobs to be submitted
* @param startFlag Latch released from main to start pipeline
* @throws java.io.IOException
*/
public JobFactory(JobSubmitter submitter, InputStream jobTrace,
Path scratch, Configuration conf, CountDownLatch startFlag,
UserResolver userResolver) throws IOException {
this(submitter, new ZombieJobProducer(jobTrace, null), scratch, conf,
startFlag, userResolver);
}
/**
* Constructor permitting JobStoryProducer to be mocked.
* @param submitter Component to which deserialized jobs are passed
* @param jobProducer Producer generating JobStory objects.
* @param scratch Directory into which to write output from simulated jobs
* @param conf Config passed to all jobs to be submitted
* @param startFlag Latch released from main to start pipeline
*/
protected JobFactory(JobSubmitter submitter, JobStoryProducer jobProducer,
Path scratch, Configuration conf, CountDownLatch startFlag,
UserResolver userResolver) {
sequence = new AtomicInteger(0);
this.scratch = scratch;
this.rateFactor = conf.getFloat(Gridmix.GRIDMIX_SUB_MUL, 1.0f);
this.jobProducer = jobProducer;
this.conf = new Configuration(conf);
this.submitter = submitter;
this.startFlag = startFlag;
this.rThread = createReaderThread();
if(LOG.isDebugEnabled()) {
LOG.debug(" The submission thread name is " + rThread.getName());
}
this.userResolver = userResolver;
this.jobCreator = JobCreator.getPolicy(conf, JobCreator.LOADJOB);
}
static class MinTaskInfo extends TaskInfo {
public MinTaskInfo(TaskInfo info) {
super(info.getInputBytes(), info.getInputRecords(),
info.getOutputBytes(), info.getOutputRecords(),
info.getTaskMemory(), info.getResourceUsageMetrics());
}
public long getInputBytes() {
return Math.max(0, super.getInputBytes());
}
public int getInputRecords() {
return Math.max(0, super.getInputRecords());
}
public long getOutputBytes() {
return Math.max(0, super.getOutputBytes());
}
public int getOutputRecords() {
return Math.max(0, super.getOutputRecords());
}
public long getTaskMemory() {
return Math.max(0, super.getTaskMemory());
}
}
protected static class FilterJobStory implements JobStory {
protected final JobStory job;
public FilterJobStory(JobStory job) {
this.job = job;
}
public JobConf getJobConf() { return job.getJobConf(); }
public String getName() { return job.getName(); }
public JobID getJobID() { return job.getJobID(); }
public String getUser() { return job.getUser(); }
public long getSubmissionTime() { return job.getSubmissionTime(); }
public InputSplit[] getInputSplits() { return job.getInputSplits(); }
public int getNumberMaps() { return job.getNumberMaps(); }
public int getNumberReduces() { return job.getNumberReduces(); }
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
return job.getTaskInfo(taskType, taskNumber);
}
public TaskAttemptInfo getTaskAttemptInfo(TaskType taskType, int taskNumber,
int taskAttemptNumber) {
return job.getTaskAttemptInfo(taskType, taskNumber, taskAttemptNumber);
}
public TaskAttemptInfo getMapTaskAttemptInfoAdjusted(
int taskNumber, int taskAttemptNumber, int locality) {
return job.getMapTaskAttemptInfoAdjusted(
taskNumber, taskAttemptNumber, locality);
}
public Values getOutcome() {
return job.getOutcome();
}
public String getQueueName() {
return job.getQueueName();
}
}
protected abstract Thread createReaderThread() ;
// gets the next job from the trace and does some bookkeeping for the same
private JobStory getNextJobFromTrace() throws IOException {
JobStory story = jobProducer.getNextJob();
if (story != null) {
++numJobsInTrace;
}
return story;
}
protected JobStory getNextJobFiltered() throws IOException {
JobStory job = getNextJobFromTrace();
// filter out the following jobs
// - unsuccessful jobs
// - jobs with missing submit-time
// - reduce only jobs
// These jobs are not yet supported in Gridmix
while (job != null &&
(job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS ||
job.getSubmissionTime() < 0 || job.getNumberMaps() == 0)) {
if (LOG.isDebugEnabled()) {
List<String> reason = new ArrayList<String>();
if (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS) {
reason.add("STATE (" + job.getOutcome().name() + ")");
}
if (job.getSubmissionTime() < 0) {
reason.add("SUBMISSION-TIME (" + job.getSubmissionTime() + ")");
}
if (job.getNumberMaps() == 0) {
reason.add("ZERO-MAPS-JOB");
}
// TODO This should never happen. Probably we missed something!
if (reason.size() == 0) {
reason.add("N/A");
}
LOG.debug("Ignoring job " + job.getJobID() + " from the input trace."
+ " Reason: " + StringUtils.join(reason, ","));
}
job = getNextJobFromTrace();
}
return null == job ? null : new FilterJobStory(job) {
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
TaskInfo info = this.job.getTaskInfo(taskType, taskNumber);
if (info != null) {
info = new MinTaskInfo(info);
} else {
info = new MinTaskInfo(new TaskInfo(0, 0, 0, 0, 0));
}
return info;
}
};
}
/**
* Obtain the error that caused the thread to exit unexpectedly.
*/
public IOException error() {
return error;
}
/**
* Add is disabled.
* @throws UnsupportedOperationException
*/
public void add(Void ignored) {
throw new UnsupportedOperationException(getClass().getName() +
" is at the start of the pipeline and accepts no events");
}
/**
* Start the reader thread, wait for latch if necessary.
*/
public void start() {
rThread.start();
}
/**
* Wait for the reader thread to exhaust the job trace.
*/
public void join(long millis) throws InterruptedException {
rThread.join(millis);
}
/**
* Interrupt the reader thread.
*/
public void shutdown() {
rThread.interrupt();
}
/**
* Interrupt the reader thread. This requires no special consideration, as
* the thread has no pending work queue.
*/
public void abort() {
// Currently no special work
rThread.interrupt();
}
}
| 9,925 | 34.833935 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import org.apache.commons.lang.time.FastDateFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.mapred.gridmix.GenerateData.DataStatistics;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.StringUtils;
/**
* Summarizes a {@link Gridmix} run. Statistics that are reported are
* <ul>
* <li>Total number of jobs in the input trace</li>
* <li>Trace signature</li>
* <li>Total number of jobs processed from the input trace</li>
* <li>Total number of jobs submitted</li>
* <li>Total number of successful and failed jobs</li>
* <li>Total number of map/reduce tasks launched</li>
* <li>Gridmix start & end time</li>
* <li>Total time for the Gridmix run (data-generation and simulation)</li>
* <li>Gridmix Configuration (i.e job-type, submission-type, resolver)</li>
* </ul>
*/
class ExecutionSummarizer implements StatListener<JobStats> {
static final Log LOG = LogFactory.getLog(ExecutionSummarizer.class);
private static final FastDateFormat UTIL = FastDateFormat.getInstance();
private int numJobsInInputTrace;
private int totalSuccessfulJobs;
private int totalFailedJobs;
private int totalLostJobs;
private int totalMapTasksLaunched;
private int totalReduceTasksLaunched;
private long totalSimulationTime;
private long totalRuntime;
private final String commandLineArgs;
private long startTime;
private long endTime;
private long simulationStartTime;
private String inputTraceLocation;
private String inputTraceSignature;
private String jobSubmissionPolicy;
private String resolver;
private DataStatistics dataStats;
private String expectedDataSize;
/**
* Basic constructor initialized with the runtime arguments.
*/
ExecutionSummarizer(String[] args) {
startTime = System.currentTimeMillis();
// flatten the args string and store it
commandLineArgs =
org.apache.commons.lang.StringUtils.join(args, ' ');
}
/**
* Default constructor.
*/
ExecutionSummarizer() {
startTime = System.currentTimeMillis();
commandLineArgs = Summarizer.NA;
}
void start(Configuration conf) {
simulationStartTime = System.currentTimeMillis();
}
private void processJobState(JobStats stats) {
Job job = stats.getJob();
try {
if (job.isSuccessful()) {
++totalSuccessfulJobs;
} else {
++totalFailedJobs;
}
} catch (Exception e) {
// this behavior is consistent with job-monitor which marks the job as
// complete (lost) if the status polling bails out
++totalLostJobs;
}
}
private void processJobTasks(JobStats stats) {
totalMapTasksLaunched += stats.getNoOfMaps();
totalReduceTasksLaunched += stats.getNoOfReds();
}
private void process(JobStats stats) {
// process the job run state
processJobState(stats);
// process the tasks information
processJobTasks(stats);
}
@Override
public void update(JobStats item) {
// process only if the simulation has started
if (simulationStartTime > 0) {
process(item);
totalSimulationTime =
System.currentTimeMillis() - getSimulationStartTime();
}
}
// Generates a signature for the trace file based on
// - filename
// - modification time
// - file length
// - owner
protected static String getTraceSignature(String input) throws IOException {
Path inputPath = new Path(input);
FileSystem fs = inputPath.getFileSystem(new Configuration());
FileStatus status = fs.getFileStatus(inputPath);
Path qPath = fs.makeQualified(status.getPath());
String traceID = status.getModificationTime() + qPath.toString()
+ status.getOwner() + status.getLen();
return MD5Hash.digest(traceID).toString();
}
@SuppressWarnings("unchecked")
void finalize(JobFactory factory, String inputPath, long dataSize,
UserResolver userResolver, DataStatistics stats,
Configuration conf)
throws IOException {
numJobsInInputTrace = factory.numJobsInTrace;
endTime = System.currentTimeMillis();
if ("-".equals(inputPath)) {
inputTraceLocation = Summarizer.NA;
inputTraceSignature = Summarizer.NA;
} else {
Path inputTracePath = new Path(inputPath);
FileSystem fs = inputTracePath.getFileSystem(conf);
inputTraceLocation = fs.makeQualified(inputTracePath).toString();
inputTraceSignature = getTraceSignature(inputPath);
}
jobSubmissionPolicy = Gridmix.getJobSubmissionPolicy(conf).name();
resolver = userResolver.getClass().getName();
if (dataSize > 0) {
expectedDataSize = StringUtils.humanReadableInt(dataSize);
} else {
expectedDataSize = Summarizer.NA;
}
dataStats = stats;
totalRuntime = System.currentTimeMillis() - getStartTime();
}
/**
* Summarizes the current {@link Gridmix} run.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Execution Summary:-");
builder.append("\nInput trace: ").append(getInputTraceLocation());
builder.append("\nInput trace signature: ")
.append(getInputTraceSignature());
builder.append("\nTotal number of jobs in trace: ")
.append(getNumJobsInTrace());
builder.append("\nExpected input data size: ")
.append(getExpectedDataSize());
builder.append("\nInput data statistics: ")
.append(getInputDataStatistics());
builder.append("\nTotal number of jobs processed: ")
.append(getNumSubmittedJobs());
builder.append("\nTotal number of successful jobs: ")
.append(getNumSuccessfulJobs());
builder.append("\nTotal number of failed jobs: ")
.append(getNumFailedJobs());
builder.append("\nTotal number of lost jobs: ")
.append(getNumLostJobs());
builder.append("\nTotal number of map tasks launched: ")
.append(getNumMapTasksLaunched());
builder.append("\nTotal number of reduce task launched: ")
.append(getNumReduceTasksLaunched());
builder.append("\nGridmix start time: ")
.append(UTIL.format(getStartTime()));
builder.append("\nGridmix end time: ").append(UTIL.format(getEndTime()));
builder.append("\nGridmix simulation start time: ")
.append(UTIL.format(getStartTime()));
builder.append("\nGridmix runtime: ")
.append(StringUtils.formatTime(getRuntime()));
builder.append("\nTime spent in initialization (data-gen etc): ")
.append(StringUtils.formatTime(getInitTime()));
builder.append("\nTime spent in simulation: ")
.append(StringUtils.formatTime(getSimulationTime()));
builder.append("\nGridmix configuration parameters: ")
.append(getCommandLineArgsString());
builder.append("\nGridmix job submission policy: ")
.append(getJobSubmissionPolicy());
builder.append("\nGridmix resolver: ").append(getUserResolver());
builder.append("\n\n");
return builder.toString();
}
// Gets the stringified version of DataStatistics
static String stringifyDataStatistics(DataStatistics stats) {
if (stats != null) {
StringBuffer buffer = new StringBuffer();
String compressionStatus = stats.isDataCompressed()
? "Compressed"
: "Uncompressed";
buffer.append(compressionStatus).append(" input data size: ");
buffer.append(StringUtils.humanReadableInt(stats.getDataSize()));
buffer.append(", ");
buffer.append("Number of files: ").append(stats.getNumFiles());
return buffer.toString();
} else {
return Summarizer.NA;
}
}
// Getters
protected String getExpectedDataSize() {
return expectedDataSize;
}
protected String getUserResolver() {
return resolver;
}
protected String getInputDataStatistics() {
return stringifyDataStatistics(dataStats);
}
protected String getInputTraceSignature() {
return inputTraceSignature;
}
protected String getInputTraceLocation() {
return inputTraceLocation;
}
protected int getNumJobsInTrace() {
return numJobsInInputTrace;
}
protected int getNumSuccessfulJobs() {
return totalSuccessfulJobs;
}
protected int getNumFailedJobs() {
return totalFailedJobs;
}
protected int getNumLostJobs() {
return totalLostJobs;
}
protected int getNumSubmittedJobs() {
return totalSuccessfulJobs + totalFailedJobs + totalLostJobs;
}
protected int getNumMapTasksLaunched() {
return totalMapTasksLaunched;
}
protected int getNumReduceTasksLaunched() {
return totalReduceTasksLaunched;
}
protected long getStartTime() {
return startTime;
}
protected long getEndTime() {
return endTime;
}
protected long getInitTime() {
return simulationStartTime - startTime;
}
protected long getSimulationStartTime() {
return simulationStartTime;
}
protected long getSimulationTime() {
return totalSimulationTime;
}
protected long getRuntime() {
return totalRuntime;
}
protected String getCommandLineArgsString() {
return commandLineArgs;
}
protected String getJobSubmissionPolicy() {
return jobSubmissionPolicy;
}
}
| 10,550 | 31.971875 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
/**
* A random text generator. The words are simply sequences of alphabets.
*/
class RandomTextDataGenerator {
static final Log LOG = LogFactory.getLog(RandomTextDataGenerator.class);
/**
* Configuration key for random text data generator's list size.
*/
static final String GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE =
"gridmix.datagenerator.randomtext.listsize";
/**
* Configuration key for random text data generator's word size.
*/
static final String GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE =
"gridmix.datagenerator.randomtext.wordsize";
/**
* Default random text data generator's list size.
*/
static final int DEFAULT_LIST_SIZE = 200;
/**
* Default random text data generator's word size.
*/
static final int DEFAULT_WORD_SIZE = 10;
/**
* Default random text data generator's seed.
*/
static final long DEFAULT_SEED = 0L;
/**
* A list of random words
*/
private String[] words;
private Random random;
/**
* Constructor for {@link RandomTextDataGenerator} with default seed.
* @param size the total number of words to consider.
* @param wordSize Size of each word
*/
RandomTextDataGenerator(int size, int wordSize) {
this(size, DEFAULT_SEED , wordSize);
}
/**
* Constructor for {@link RandomTextDataGenerator}.
* @param size the total number of words to consider.
* @param seed Random number generator seed for repeatability
* @param wordSize Size of each word
*/
RandomTextDataGenerator(int size, Long seed, int wordSize) {
random = new Random(seed);
words = new String[size];
//TODO change the default with the actual stats
//TODO do u need varied sized words?
for (int i = 0; i < size; ++i) {
words[i] =
RandomStringUtils.random(wordSize, 0, 0, true, false, null, random);
}
}
/**
* Get the configured random text data generator's list size.
*/
static int getRandomTextDataGeneratorListSize(Configuration conf) {
return conf.getInt(GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE, DEFAULT_LIST_SIZE);
}
/**
* Set the random text data generator's list size.
*/
static void setRandomTextDataGeneratorListSize(Configuration conf,
int listSize) {
if (LOG.isDebugEnabled()) {
LOG.debug("Random text data generator is configured to use a dictionary "
+ " with " + listSize + " words");
}
conf.setInt(GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE, listSize);
}
/**
* Get the configured random text data generator word size.
*/
static int getRandomTextDataGeneratorWordSize(Configuration conf) {
return conf.getInt(GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE, DEFAULT_WORD_SIZE);
}
/**
* Set the random text data generator word size.
*/
static void setRandomTextDataGeneratorWordSize(Configuration conf,
int wordSize) {
if (LOG.isDebugEnabled()) {
LOG.debug("Random text data generator is configured to use a dictionary "
+ " with words of length " + wordSize);
}
conf.setInt(GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE, wordSize);
}
/**
* Returns a randomly selected word from a list of random words.
*/
String getRandomWord() {
int index = random.nextInt(words.length);
return words[index];
}
/**
* This is mainly for testing.
*/
List<String> getRandomWords() {
return Arrays.asList(words);
}
}
| 4,621 | 30.22973 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
/**
* For every record consumed, read key + val bytes from the stream provided.
*/
class ReadRecordFactory extends RecordFactory {
/**
* Size of internal, scratch buffer to read from internal stream.
*/
public static final String GRIDMIX_READ_BUF_SIZE = "gridmix.read.buffer.size";
private final byte[] buf;
private final InputStream src;
private final RecordFactory factory;
/**
* @param targetBytes Expected byte count.
* @param targetRecords Expected record count.
* @param src Stream to read bytes.
* @param conf Used to establish read buffer size. @see #GRIDMIX_READ_BUF_SIZE
*/
public ReadRecordFactory(long targetBytes, long targetRecords,
InputStream src, Configuration conf) {
this(new AvgRecordFactory(targetBytes, targetRecords, conf), src, conf);
}
/**
* @param factory Factory to draw record sizes.
* @param src Stream to read bytes.
* @param conf Used to establish read buffer size. @see #GRIDMIX_READ_BUF_SIZE
*/
public ReadRecordFactory(RecordFactory factory, InputStream src,
Configuration conf) {
this.src = src;
this.factory = factory;
buf = new byte[conf.getInt(GRIDMIX_READ_BUF_SIZE, 64 * 1024)];
}
@Override
public boolean next(GridmixKey key, GridmixRecord val) throws IOException {
if (!factory.next(key, val)) {
return false;
}
for (int len = (null == key ? 0 : key.getSize()) + val.getSize();
len > 0; len -= buf.length) {
IOUtils.readFully(src, buf, 0, Math.min(buf.length, len));
}
return true;
}
@Override
public float getProgress() throws IOException {
return factory.getProgress();
}
@Override
public void close() throws IOException {
IOUtils.cleanup(null, src);
factory.close();
}
}
| 2,768 | 31.197674 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.nio.channels.ClosedByInterruptException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
/**
* Component accepting deserialized job traces, computing split data, and
* submitting to the cluster on deadline. Each job added from an upstream
* factory must be submitted to the cluster by the deadline recorded on it.
* Once submitted, jobs must be added to a downstream component for
* monitoring.
*/
class JobSubmitter implements Gridmix.Component<GridmixJob> {
public static final Log LOG = LogFactory.getLog(JobSubmitter.class);
private final Semaphore sem;
private final Statistics statistics;
private final FilePool inputDir;
private final JobMonitor monitor;
private final ExecutorService sched;
private volatile boolean shutdown = false;
private final int queueDepth;
/**
* Initialize the submission component with downstream monitor and pool of
* files from which split data may be read.
* @param monitor Monitor component to which jobs should be passed
* @param threads Number of submission threads
* See {@link Gridmix#GRIDMIX_SUB_THR}.
* @param queueDepth Max depth of pending work queue
* See {@link Gridmix#GRIDMIX_QUE_DEP}.
* @param inputDir Set of files from which split data may be mined for
* synthetic jobs.
* @param statistics
*/
public JobSubmitter(JobMonitor monitor, int threads, int queueDepth,
FilePool inputDir, Statistics statistics) {
this.queueDepth = queueDepth;
sem = new Semaphore(queueDepth);
sched = new ThreadPoolExecutor(threads, threads, 0L,
TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
this.inputDir = inputDir;
this.monitor = monitor;
this.statistics = statistics;
}
/**
* Runnable wrapping a job to be submitted to the cluster.
*/
private class SubmitTask implements Runnable {
final GridmixJob job;
public SubmitTask(GridmixJob job) {
this.job = job;
}
public void run() {
JobStats stats =
Statistics.generateJobStats(job.getJob(), job.getJobDesc());
try {
// pre-compute split information
try {
long start = System.currentTimeMillis();
job.buildSplits(inputDir);
long end = System.currentTimeMillis();
LOG.info("[JobSubmitter] Time taken to build splits for job "
+ job.getJob().getJobID() + ": " + (end - start) + " ms.");
} catch (IOException e) {
LOG.warn("Failed to submit " + job.getJob().getJobName() + " as "
+ job.getUgi(), e);
monitor.submissionFailed(stats);
return;
} catch (Exception e) {
LOG.warn("Failed to submit " + job.getJob().getJobName() + " as "
+ job.getUgi(), e);
monitor.submissionFailed(stats);
return;
}
// Sleep until deadline
long nsDelay = job.getDelay(TimeUnit.NANOSECONDS);
while (nsDelay > 0) {
TimeUnit.NANOSECONDS.sleep(nsDelay);
nsDelay = job.getDelay(TimeUnit.NANOSECONDS);
}
try {
// submit job
long start = System.currentTimeMillis();
job.call();
long end = System.currentTimeMillis();
LOG.info("[JobSubmitter] Time taken to submit the job "
+ job.getJob().getJobID() + ": " + (end - start) + " ms.");
// mark it as submitted
job.setSubmitted();
// add to the monitor
monitor.add(stats);
// add to the statistics
statistics.addJobStats(stats);
if (LOG.isDebugEnabled()) {
String jobID =
job.getJob().getConfiguration().get(Gridmix.ORIGINAL_JOB_ID);
LOG.debug("Original job '" + jobID + "' is being simulated as '"
+ job.getJob().getJobID() + "'");
LOG.debug("SUBMIT " + job + "@" + System.currentTimeMillis()
+ " (" + job.getJob().getJobID() + ")");
}
} catch (IOException e) {
LOG.warn("Failed to submit " + job.getJob().getJobName() + " as "
+ job.getUgi(), e);
if (e.getCause() instanceof ClosedByInterruptException) {
throw new InterruptedException("Failed to submit " +
job.getJob().getJobName());
}
monitor.submissionFailed(stats);
} catch (ClassNotFoundException e) {
LOG.warn("Failed to submit " + job.getJob().getJobName(), e);
monitor.submissionFailed(stats);
}
} catch (InterruptedException e) {
// abort execution, remove splits if nesc
// TODO release ThdLoc
GridmixJob.pullDescription(job.id());
Thread.currentThread().interrupt();
monitor.submissionFailed(stats);
} catch(Exception e) {
//Due to some exception job wasnt submitted.
LOG.info(" Job " + job.getJob().getJobID() + " submission failed " , e);
monitor.submissionFailed(stats);
} finally {
sem.release();
}
}
}
/**
* Enqueue the job to be submitted per the deadline associated with it.
*/
public void add(final GridmixJob job) throws InterruptedException {
final boolean addToQueue = !shutdown;
if (addToQueue) {
final SubmitTask task = new SubmitTask(job);
LOG.info("Total number of queued jobs: "
+ (queueDepth - sem.availablePermits()));
sem.acquire();
try {
sched.execute(task);
} catch (RejectedExecutionException e) {
sem.release();
}
}
}
/**
* (Re)scan the set of input files from which splits are derived.
* @throws java.io.IOException
*/
public void refreshFilePool() throws IOException {
inputDir.refresh();
}
/**
* Does nothing, as the threadpool is already initialized and waiting for
* work from the upstream factory.
*/
public void start() { }
/**
* Continue running until all queued jobs have been submitted to the
* cluster.
*/
public void join(long millis) throws InterruptedException {
if (!shutdown) {
throw new IllegalStateException("Cannot wait for active submit thread");
}
sched.awaitTermination(millis, TimeUnit.MILLISECONDS);
}
/**
* Finish all jobs pending submission, but do not accept new work.
*/
public void shutdown() {
// complete pending tasks, but accept no new tasks
shutdown = true;
sched.shutdown();
}
/**
* Discard pending work, including precomputed work waiting to be
* submitted.
*/
public void abort() {
//pendingJobs.clear();
shutdown = true;
sched.shutdownNow();
}
}
| 7,994 | 34.376106 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
class ReplayJobFactory extends JobFactory<Statistics.ClusterStats> {
public static final Log LOG = LogFactory.getLog(ReplayJobFactory.class);
/**
* Creating a new instance does not start the thread.
*
* @param submitter Component to which deserialized jobs are passed
* @param jobProducer Job story producer
* {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
* @param scratch Directory into which to write output from simulated jobs
* @param conf Config passed to all jobs to be submitted
* @param startFlag Latch released from main to start pipeline
* @param resolver
* @throws java.io.IOException
*/
public ReplayJobFactory(
JobSubmitter submitter, JobStoryProducer jobProducer, Path scratch,
Configuration conf, CountDownLatch startFlag, UserResolver resolver)
throws IOException {
super(submitter, jobProducer, scratch, conf, startFlag, resolver);
}
@Override
public Thread createReaderThread() {
return new ReplayReaderThread("ReplayJobFactory");
}
/**
* @param item
*/
public void update(Statistics.ClusterStats item) {
}
private class ReplayReaderThread extends Thread {
public ReplayReaderThread(String threadName) {
super(threadName);
}
public void run() {
try {
startFlag.await();
if (Thread.currentThread().isInterrupted()) {
return;
}
final long initTime = TimeUnit.MILLISECONDS.convert(
System.nanoTime(), TimeUnit.NANOSECONDS);
LOG.info("START REPLAY @ " + initTime);
long first = -1;
long last = -1;
while (!Thread.currentThread().isInterrupted()) {
try {
final JobStory job = getNextJobFiltered();
if (null == job) {
return;
}
if (first < 0) {
first = job.getSubmissionTime();
}
final long current = job.getSubmissionTime();
if (current < last) {
LOG.warn("Job " + job.getJobID() + " out of order");
continue;
}
last = current;
submitter.add(
jobCreator.createGridmixJob(
conf, initTime + Math.round(rateFactor * (current - first)),
job, scratch,
userResolver.getTargetUgi(
UserGroupInformation.createRemoteUser(job.getUser())),
sequence.getAndIncrement()));
} catch (IOException e) {
error = e;
return;
}
}
} catch (InterruptedException e) {
// exit thread; ignore any jobs remaining in the trace
} finally {
IOUtils.cleanup(null, jobProducer);
}
}
}
/**
* Start the reader thread, wait for latch if necessary.
*/
@Override
public void start() {
this.rThread.start();
}
}
| 4,267 | 32.34375 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.locks.Condition;
public class SerialJobFactory extends JobFactory<JobStats> {
public static final Log LOG = LogFactory.getLog(SerialJobFactory.class);
private final Condition jobCompleted = lock.newCondition();
/**
* Creating a new instance does not start the thread.
*
* @param submitter Component to which deserialized jobs are passed
* @param jobProducer Job story producer
* {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
* @param scratch Directory into which to write output from simulated jobs
* @param conf Config passed to all jobs to be submitted
* @param startFlag Latch released from main to start pipeline
* @throws java.io.IOException
*/
public SerialJobFactory(
JobSubmitter submitter, JobStoryProducer jobProducer, Path scratch,
Configuration conf, CountDownLatch startFlag, UserResolver resolver)
throws IOException {
super(submitter, jobProducer, scratch, conf, startFlag, resolver);
}
@Override
public Thread createReaderThread() {
return new SerialReaderThread("SerialJobFactory");
}
private class SerialReaderThread extends Thread {
public SerialReaderThread(String threadName) {
super(threadName);
}
/**
* SERIAL : In this scenario . method waits on notification ,
* that a submitted job is actually completed. Logic is simple.
* ===
* while(true) {
* wait till previousjob is completed.
* break;
* }
* submit newJob.
* previousJob = newJob;
* ==
*/
@Override
public void run() {
try {
startFlag.await();
if (Thread.currentThread().isInterrupted()) {
return;
}
LOG.info("START SERIAL @ " + System.currentTimeMillis());
GridmixJob prevJob;
while (!Thread.currentThread().isInterrupted()) {
final JobStory job;
try {
job = getNextJobFiltered();
if (null == job) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug(
"Serial mode submitting job " + job.getName());
}
prevJob = jobCreator.createGridmixJob(
conf, 0L, job, scratch,
userResolver.getTargetUgi(
UserGroupInformation.createRemoteUser(job.getUser())),
sequence.getAndIncrement());
lock.lock();
try {
LOG.info(" Submitted the job " + prevJob);
submitter.add(prevJob);
} finally {
lock.unlock();
}
} catch (IOException e) {
error = e;
//If submission of current job fails , try to submit the next job.
return;
}
if (prevJob != null) {
//Wait till previous job submitted is completed.
lock.lock();
try {
while (true) {
try {
jobCompleted.await();
} catch (InterruptedException ie) {
LOG.error(
" Error in SerialJobFactory while waiting for job completion ",
ie);
return;
}
if (LOG.isDebugEnabled()) {
LOG.info(" job " + job.getName() + " completed ");
}
break;
}
} finally {
lock.unlock();
}
prevJob = null;
}
}
} catch (InterruptedException e) {
return;
} finally {
IOUtils.cleanup(null, jobProducer);
}
}
}
/**
* SERIAL. Once you get notification from StatsCollector about the job
* completion ,simply notify the waiting thread.
*
* @param item
*/
@Override
public void update(Statistics.JobStats item) {
//simply notify in case of serial submissions. We are just bothered
//if submitted job is completed or not.
lock.lock();
try {
jobCompleted.signalAll();
} finally {
lock.unlock();
}
}
/**
* Start the reader thread, wait for latch if necessary.
*/
@Override
public void start() {
LOG.info(" Starting Serial submission ");
this.rThread.start();
}
// it is need for test
void setDistCacheEmulator(DistributedCacheEmulator e) {
jobCreator.setDistCacheEmulator(e);
}
}
| 5,812 | 30.765027 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Summarizer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.gridmix.GenerateData.DataStatistics;
/**
* Summarizes various aspects of a {@link Gridmix} run.
*/
class Summarizer {
private ExecutionSummarizer executionSummarizer;
private ClusterSummarizer clusterSummarizer;
protected static final String NA = "N/A";
Summarizer() {
this(new String[]{NA});
}
Summarizer(String[] args) {
executionSummarizer = new ExecutionSummarizer(args);
clusterSummarizer = new ClusterSummarizer();
}
ExecutionSummarizer getExecutionSummarizer() {
return executionSummarizer;
}
ClusterSummarizer getClusterSummarizer() {
return clusterSummarizer;
}
void start(Configuration conf) {
executionSummarizer.start(conf);
clusterSummarizer.start(conf);
}
/**
* This finalizes the summarizer.
*/
@SuppressWarnings("unchecked")
void finalize(JobFactory factory, String path, long size,
UserResolver resolver, DataStatistics stats, Configuration conf)
throws IOException {
executionSummarizer.finalize(factory, path, size, resolver, stats, conf);
}
/**
* Summarizes the current {@link Gridmix} run and the cluster used.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(executionSummarizer.toString());
builder.append(clusterSummarizer.toString());
return builder.toString();
}
}
| 2,343 | 30.253333 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.lang.time.FastDateFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.gridmix.Statistics.ClusterStats;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
/**
* Summarizes the Hadoop cluster used in this {@link Gridmix} run.
* Statistics that are reported are
* <ul>
* <li>Total number of active trackers in the cluster</li>
* <li>Total number of blacklisted trackers in the cluster</li>
* <li>Max map task capacity of the cluster</li>
* <li>Max reduce task capacity of the cluster</li>
* </ul>
*
* Apart from these statistics, {@link JobTracker} and {@link FileSystem}
* addresses are also recorded in the summary.
*/
class ClusterSummarizer implements StatListener<ClusterStats> {
static final Log LOG = LogFactory.getLog(ClusterSummarizer.class);
private int numBlacklistedTrackers;
private int numActiveTrackers;
private int maxMapTasks;
private int maxReduceTasks;
private String jobTrackerInfo = Summarizer.NA;
private String namenodeInfo = Summarizer.NA;
@Override
@SuppressWarnings("deprecation")
public void update(ClusterStats item) {
try {
numBlacklistedTrackers = item.getStatus().getBlacklistedTrackers();
numActiveTrackers = item.getStatus().getTaskTrackers();
maxMapTasks = item.getStatus().getMaxMapTasks();
maxReduceTasks = item.getStatus().getMaxReduceTasks();
} catch (Exception e) {
long time = System.currentTimeMillis();
LOG.info("Error in processing cluster status at "
+ FastDateFormat.getInstance().format(time));
}
}
/**
* Summarizes the cluster used for this {@link Gridmix} run.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Cluster Summary:-");
builder.append("\nJobTracker: ").append(getJobTrackerInfo());
builder.append("\nFileSystem: ").append(getNamenodeInfo());
builder.append("\nNumber of blacklisted trackers: ")
.append(getNumBlacklistedTrackers());
builder.append("\nNumber of active trackers: ")
.append(getNumActiveTrackers());
builder.append("\nMax map task capacity: ")
.append(getMaxMapTasks());
builder.append("\nMax reduce task capacity: ").append(getMaxReduceTasks());
builder.append("\n\n");
return builder.toString();
}
void start(Configuration conf) {
jobTrackerInfo = conf.get(JTConfig.JT_IPC_ADDRESS);
namenodeInfo = conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
}
// Getters
protected int getNumBlacklistedTrackers() {
return numBlacklistedTrackers;
}
protected int getNumActiveTrackers() {
return numActiveTrackers;
}
protected int getMaxMapTasks() {
return maxMapTasks;
}
protected int getMaxReduceTasks() {
return maxReduceTasks;
}
protected String getJobTrackerInfo() {
return jobTrackerInfo;
}
protected String getNamenodeInfo() {
return namenodeInfo;
}
}
| 4,064 | 33.74359 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/UserResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Maps users in the trace to a set of valid target users on the test cluster.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface UserResolver {
/**
* Configure the user map given the URI and configuration. The resolver's
* contract will define how the resource will be interpreted, but the default
* will typically interpret the URI as a {@link org.apache.hadoop.fs.Path}
* listing target users.
* This method should be called only if {@link #needsTargetUsersList()}
* returns true.
* @param userdesc URI from which user information may be loaded per the
* subclass contract.
* @param conf The tool configuration.
* @return true if the resource provided was used in building the list of
* target users
*/
public boolean setTargetUsers(URI userdesc, Configuration conf)
throws IOException;
/**
* Map the given UGI to another per the subclass contract.
* @param ugi User information from the trace.
*/
public UserGroupInformation getTargetUgi(UserGroupInformation ugi);
/**
* Indicates whether this user resolver needs a list of target users to be
* provided.
*
* @return true if a list of target users is to be provided for this
* user resolver
*/
public boolean needsTargetUsersList();
}
| 2,422 | 35.712121 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StatListener.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
/**
* Stat listener.
* @param <T>
*/
interface StatListener<T>{
/**
*
* @param item
*/
void update(T item);
}
| 985 | 28.878788 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Resolves all UGIs to the submitting user.
*/
public class SubmitterUserResolver implements UserResolver {
public static final Log LOG = LogFactory.getLog(SubmitterUserResolver.class);
private UserGroupInformation ugi = null;
public SubmitterUserResolver() throws IOException {
LOG.info(" Current user resolver is SubmitterUserResolver ");
ugi = UserGroupInformation.getLoginUser();
}
public synchronized boolean setTargetUsers(URI userdesc, Configuration conf)
throws IOException {
return false;
}
public synchronized UserGroupInformation getTargetUgi(
UserGroupInformation ugi) {
return this.ugi;
}
/**
* {@inheritDoc}
* <p>
* Since {@link SubmitterUserResolver} returns the user name who is running
* gridmix, it doesn't need a target list of users.
*/
public boolean needsTargetUsersList() {
return false;
}
}
| 1,983 | 32.066667 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
import org.apache.hadoop.mapred.gridmix.Statistics.ClusterStats;
import java.util.concurrent.CountDownLatch;
import java.io.IOException;
import org.apache.hadoop.util.StringUtils;
enum GridmixJobSubmissionPolicy {
REPLAY("REPLAY", 320000) {
@Override
public JobFactory<ClusterStats> createJobFactory(
JobSubmitter submitter, JobStoryProducer producer, Path scratchDir,
Configuration conf, CountDownLatch startFlag, UserResolver userResolver)
throws IOException {
return new ReplayJobFactory(
submitter, producer, scratchDir, conf, startFlag, userResolver);
}
},
STRESS("STRESS", 5000) {
@Override
public JobFactory<ClusterStats> createJobFactory(
JobSubmitter submitter, JobStoryProducer producer, Path scratchDir,
Configuration conf, CountDownLatch startFlag, UserResolver userResolver)
throws IOException {
return new StressJobFactory(
submitter, producer, scratchDir, conf, startFlag, userResolver);
}
},
SERIAL("SERIAL", 0) {
@Override
public JobFactory<JobStats> createJobFactory(
JobSubmitter submitter, JobStoryProducer producer, Path scratchDir,
Configuration conf, CountDownLatch startFlag, UserResolver userResolver)
throws IOException {
return new SerialJobFactory(
submitter, producer, scratchDir, conf, startFlag, userResolver);
}
};
public static final String JOB_SUBMISSION_POLICY =
"gridmix.job-submission.policy";
private final String name;
private final int pollingInterval;
GridmixJobSubmissionPolicy(String name, int pollingInterval) {
this.name = name;
this.pollingInterval = pollingInterval;
}
public abstract JobFactory createJobFactory(
JobSubmitter submitter, JobStoryProducer producer, Path scratchDir,
Configuration conf, CountDownLatch startFlag, UserResolver userResolver)
throws IOException;
public int getPollingInterval() {
return pollingInterval;
}
public static GridmixJobSubmissionPolicy getPolicy(
Configuration conf, GridmixJobSubmissionPolicy defaultPolicy) {
String policy = conf.get(JOB_SUBMISSION_POLICY, defaultPolicy.name());
return valueOf(StringUtils.toUpperCase(policy));
}
}
| 3,288 | 35.142857 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
class GridmixRecord implements WritableComparable<GridmixRecord> {
private static final int FIXED_BYTES = 1;
private int size = -1;
private long seed;
private final DataInputBuffer dib =
new DataInputBuffer();
private final DataOutputBuffer dob =
new DataOutputBuffer(Long.SIZE / Byte.SIZE);
private byte[] literal = dob.getData();
private boolean compressible = false;
private float compressionRatio =
CompressionEmulationUtil.DEFAULT_COMPRESSION_RATIO;
private RandomTextDataGenerator rtg = null;
GridmixRecord() {
this(1, 0L);
}
GridmixRecord(int size, long seed) {
this.seed = seed;
setSizeInternal(size);
}
public int getSize() {
return size;
}
public void setSize(int size) {
setSizeInternal(size);
}
void setCompressibility(boolean compressible, float ratio) {
this.compressible = compressible;
this.compressionRatio = ratio;
// Initialize the RandomTextDataGenerator once for every GridMix record
// Note that RandomTextDataGenerator is needed only when the GridMix record
// is configured to generate compressible text data.
if (compressible) {
rtg =
CompressionEmulationUtil.getRandomTextDataGenerator(ratio,
RandomTextDataGenerator.DEFAULT_SEED);
}
}
private void setSizeInternal(int size) {
this.size = Math.max(1, size);
try {
seed = maskSeed(seed, this.size);
dob.reset();
dob.writeLong(seed);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public final void setSeed(long seed) {
this.seed = seed;
}
/** Marsaglia, 2003. */
long nextRand(long x) {
x ^= (x << 13);
x ^= (x >>> 7);
return (x ^= (x << 17));
}
/**
* Generate random text data that can be compressed. If the record is marked
* compressible (via {@link FileOutputFormat#COMPRESS}), only then the
* random data will be text data else
* {@link GridmixRecord#writeRandom(DataOutput, int)} will be invoked.
*/
private void writeRandomText(DataOutput out, final int size)
throws IOException {
long tmp = seed;
out.writeLong(tmp);
int i = size - (Long.SIZE / Byte.SIZE);
//TODO Should we use long for size. What if the data is more than 4G?
String randomWord = rtg.getRandomWord();
byte[] bytes = randomWord.getBytes("UTF-8");
long randomWordSize = bytes.length;
while (i >= randomWordSize) {
out.write(bytes);
i -= randomWordSize;
// get the next random word
randomWord = rtg.getRandomWord();
bytes = randomWord.getBytes("UTF-8");
// determine the random word size
randomWordSize = bytes.length;
}
// pad the remaining bytes
if (i > 0) {
out.write(bytes, 0, i);
}
}
public void writeRandom(DataOutput out, final int size) throws IOException {
long tmp = seed;
out.writeLong(tmp);
int i = size - (Long.SIZE / Byte.SIZE);
while (i > Long.SIZE / Byte.SIZE - 1) {
tmp = nextRand(tmp);
out.writeLong(tmp);
i -= Long.SIZE / Byte.SIZE;
}
for (tmp = nextRand(tmp); i > 0; --i) {
out.writeByte((int)(tmp & 0xFF));
tmp >>>= Byte.SIZE;
}
}
@Override
public void readFields(DataInput in) throws IOException {
size = WritableUtils.readVInt(in);
int payload = size - WritableUtils.getVIntSize(size);
if (payload > Long.SIZE / Byte.SIZE) {
seed = in.readLong();
payload -= Long.SIZE / Byte.SIZE;
} else {
Arrays.fill(literal, (byte)0);
in.readFully(literal, 0, payload);
dib.reset(literal, 0, literal.length);
seed = dib.readLong();
payload = 0;
}
final int vBytes = in.skipBytes(payload);
if (vBytes != payload) {
throw new EOFException("Expected " + payload + ", read " + vBytes);
}
}
@Override
public void write(DataOutput out) throws IOException {
// data bytes including vint encoding
WritableUtils.writeVInt(out, size);
final int payload = size - WritableUtils.getVIntSize(size);
if (payload > Long.SIZE / Byte.SIZE) {
if (compressible) {
writeRandomText(out, payload);
} else {
writeRandom(out, payload);
}
} else if (payload > 0) {
//TODO What is compressible is turned on? LOG is a bad idea!
out.write(literal, 0, payload);
}
}
@Override
public int compareTo(GridmixRecord other) {
return compareSeed(other.seed,
Math.max(0, other.getSize() - other.fixedBytes()));
}
int fixedBytes() {
// min vint size
return FIXED_BYTES;
}
private static long maskSeed(long sd, int sz) {
// Don't use fixedBytes here; subclasses will set intended random len
if (sz <= FIXED_BYTES) {
sd = 0L;
} else if (sz < Long.SIZE / Byte.SIZE + FIXED_BYTES) {
final int tmp = sz - FIXED_BYTES;
final long mask = (1L << (Byte.SIZE * tmp)) - 1;
sd &= mask << (Byte.SIZE * (Long.SIZE / Byte.SIZE - tmp));
}
return sd;
}
int compareSeed(long jSeed, int jSize) {
final int iSize = Math.max(0, getSize() - fixedBytes());
final int seedLen = Math.min(iSize, jSize) + FIXED_BYTES;
jSeed = maskSeed(jSeed, seedLen);
long iSeed = maskSeed(seed, seedLen);
final int cmplen = Math.min(iSize, jSize);
for (int i = 0; i < cmplen; i += Byte.SIZE) {
final int k = cmplen - i;
for (long j = Long.SIZE - Byte.SIZE;
j >= Math.max(0, Long.SIZE / Byte.SIZE - k) * Byte.SIZE;
j -= Byte.SIZE) {
final int xi = (int)((iSeed >>> j) & 0xFFL);
final int xj = (int)((jSeed >>> j) & 0xFFL);
if (xi != xj) {
return xi - xj;
}
}
iSeed = nextRand(iSeed);
jSeed = nextRand(jSeed);
}
return iSize - jSize;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other != null && other.getClass() == getClass()) {
final GridmixRecord o = ((GridmixRecord)other);
return getSize() == o.getSize() && seed == o.seed;
}
return false;
}
@Override
public int hashCode() {
return (int)(seed * getSize());
}
public static class Comparator extends WritableComparator {
public Comparator() {
super(GridmixRecord.class);
}
public Comparator(Class<? extends WritableComparable<?>> sub) {
super(sub);
}
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
int n2 = WritableUtils.decodeVIntSize(b2[s2]);
n1 -= WritableUtils.getVIntSize(n1);
n2 -= WritableUtils.getVIntSize(n2);
return compareBytes(b1, s1+n1, l1-n1, b2, s2+n2, l2-n2);
}
static {
WritableComparator.define(GridmixRecord.class, new Comparator());
}
}
}
| 8,158 | 28.996324 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FilePool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.RandomAlgorithms.Selector;
/**
* Class for caching a pool of input data to be used by synthetic jobs for
* simulating read traffic.
*/
class FilePool {
public static final Log LOG = LogFactory.getLog(FilePool.class);
/**
* The minimum file size added to the pool. Default 128MiB.
*/
public static final String GRIDMIX_MIN_FILE = "gridmix.min.file.size";
/**
* The maximum size for files added to the pool. Defualts to 100TiB.
*/
public static final String GRIDMIX_MAX_TOTAL = "gridmix.max.total.scan";
private Node root;
private final Path path;
private final FileSystem fs;
private final Configuration conf;
private final ReadWriteLock updateLock;
/**
* Initialize a filepool under the path provided, but do not populate the
* cache.
*/
public FilePool(Configuration conf, Path input) throws IOException {
root = null;
this.conf = conf;
this.path = input;
this.fs = path.getFileSystem(conf);
updateLock = new ReentrantReadWriteLock();
}
/**
* Gather a collection of files at least as large as minSize.
* @return The total size of files returned.
*/
public long getInputFiles(long minSize, Collection<FileStatus> files)
throws IOException {
updateLock.readLock().lock();
try {
return root.selectFiles(minSize, files);
} finally {
updateLock.readLock().unlock();
}
}
/**
* (Re)generate cache of input FileStatus objects.
*/
public void refresh() throws IOException {
updateLock.writeLock().lock();
try {
root = new InnerDesc(fs, fs.getFileStatus(path),
new MinFileFilter(conf.getLong(GRIDMIX_MIN_FILE, 128 * 1024 * 1024),
conf.getLong(GRIDMIX_MAX_TOTAL, 100L * (1L << 40))));
if (0 == root.getSize()) {
throw new IOException("Found no satisfactory file in " + path);
}
} finally {
updateLock.writeLock().unlock();
}
}
/**
* Get a set of locations for the given file.
*/
public BlockLocation[] locationsFor(FileStatus stat, long start, long len)
throws IOException {
// TODO cache
return fs.getFileBlockLocations(stat, start, len);
}
static abstract class Node {
protected final static Random rand = new Random();
/**
* Total size of files and directories under the current node.
*/
abstract long getSize();
/**
* Return a set of files whose cumulative size is at least
* <tt>targetSize</tt>.
* TODO Clearly size is not the only criterion, e.g. refresh from
* generated data without including running task output, tolerance
* for permission issues, etc.
*/
abstract long selectFiles(long targetSize, Collection<FileStatus> files)
throws IOException;
}
/**
* Files in current directory of this Node.
*/
static class LeafDesc extends Node {
final long size;
final ArrayList<FileStatus> curdir;
LeafDesc(ArrayList<FileStatus> curdir, long size) {
this.size = size;
this.curdir = curdir;
}
@Override
public long getSize() {
return size;
}
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
throws IOException {
if (targetSize >= getSize()) {
files.addAll(curdir);
return getSize();
}
Selector selector = new Selector(curdir.size(), (double) targetSize
/ getSize(), rand);
ArrayList<Integer> selected = new ArrayList<Integer>();
long ret = 0L;
do {
int index = selector.next();
selected.add(index);
ret += curdir.get(index).getLen();
} while (ret < targetSize);
for (Integer i : selected) {
files.add(curdir.get(i));
}
return ret;
}
}
/**
* A subdirectory of the current Node.
*/
static class InnerDesc extends Node {
final long size;
final double[] dist;
final Node[] subdir;
private static final Comparator<Node> nodeComparator =
new Comparator<Node>() {
public int compare(Node n1, Node n2) {
return n1.getSize() < n2.getSize() ? -1
: n1.getSize() > n2.getSize() ? 1 : 0;
}
};
InnerDesc(final FileSystem fs, FileStatus thisDir, MinFileFilter filter)
throws IOException {
long fileSum = 0L;
final ArrayList<FileStatus> curFiles = new ArrayList<FileStatus>();
final ArrayList<FileStatus> curDirs = new ArrayList<FileStatus>();
for (FileStatus stat : fs.listStatus(thisDir.getPath())) {
if (stat.isDirectory()) {
curDirs.add(stat);
} else if (filter.accept(stat)) {
curFiles.add(stat);
fileSum += stat.getLen();
}
}
ArrayList<Node> subdirList = new ArrayList<Node>();
if (!curFiles.isEmpty()) {
subdirList.add(new LeafDesc(curFiles, fileSum));
}
for (Iterator<FileStatus> i = curDirs.iterator();
!filter.done() && i.hasNext();) {
// add subdirectories
final Node d = new InnerDesc(fs, i.next(), filter);
final long dSize = d.getSize();
if (dSize > 0) {
fileSum += dSize;
subdirList.add(d);
}
}
size = fileSum;
LOG.debug(size + " bytes in " + thisDir.getPath());
subdir = subdirList.toArray(new Node[subdirList.size()]);
Arrays.sort(subdir, nodeComparator);
dist = new double[subdir.length];
for (int i = dist.length - 1; i > 0; --i) {
fileSum -= subdir[i].getSize();
dist[i] = fileSum / (1.0 * size);
}
}
@Override
public long getSize() {
return size;
}
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
throws IOException {
long ret = 0L;
if (targetSize >= getSize()) {
// request larger than all subdirs; add everything
for (Node n : subdir) {
long added = n.selectFiles(targetSize, files);
ret += added;
targetSize -= added;
}
return ret;
}
// can satisfy request in proper subset of contents
// select random set, weighted by size
final HashSet<Node> sub = new HashSet<Node>();
do {
assert sub.size() < subdir.length;
final double r = rand.nextDouble();
int pos = Math.abs(Arrays.binarySearch(dist, r) + 1) - 1;
while (sub.contains(subdir[pos])) {
pos = (pos + 1) % subdir.length;
}
long added = subdir[pos].selectFiles(targetSize, files);
ret += added;
targetSize -= added;
sub.add(subdir[pos]);
} while (targetSize > 0);
return ret;
}
}
/**
* Filter enforcing the minFile/maxTotal parameters of the scan.
*/
private static class MinFileFilter {
private long totalScan;
private final long minFileSize;
public MinFileFilter(long minFileSize, long totalScan) {
this.minFileSize = minFileSize;
this.totalScan = totalScan;
}
public boolean done() {
return totalScan <= 0;
}
public boolean accept(FileStatus stat) {
final boolean done = done();
if (!done && stat.getLen() >= minFileSize) {
totalScan -= stat.getLen();
return true;
}
return false;
}
}
}
| 8,894 | 28.453642 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
class LoadSplit extends CombineFileSplit {
private int id;
private int nSpec;
private int maps;
private int reduces;
private long inputRecords;
private long outputBytes;
private long outputRecords;
private long maxMemory;
private double[] reduceBytes = new double[0];
private double[] reduceRecords = new double[0];
// Spec for reduces id mod this
private long[] reduceOutputBytes = new long[0];
private long[] reduceOutputRecords = new long[0];
private ResourceUsageMetrics mapMetrics;
private ResourceUsageMetrics[] reduceMetrics;
LoadSplit() {
super();
}
public LoadSplit(CombineFileSplit cfsplit, int maps, int id, long inputBytes,
long inputRecords, long outputBytes, long outputRecords,
double[] reduceBytes, double[] reduceRecords,
long[] reduceOutputBytes, long[] reduceOutputRecords,
ResourceUsageMetrics metrics,
ResourceUsageMetrics[] rMetrics)
throws IOException {
super(cfsplit);
this.id = id;
this.maps = maps;
reduces = reduceBytes.length;
this.inputRecords = inputRecords;
this.outputBytes = outputBytes;
this.outputRecords = outputRecords;
this.reduceBytes = reduceBytes;
this.reduceRecords = reduceRecords;
nSpec = reduceOutputBytes.length;
this.reduceOutputBytes = reduceOutputBytes;
this.reduceOutputRecords = reduceOutputRecords;
this.mapMetrics = metrics;
this.reduceMetrics = rMetrics;
}
public int getId() {
return id;
}
public int getMapCount() {
return maps;
}
public long getInputRecords() {
return inputRecords;
}
public long[] getOutputBytes() {
if (0 == reduces) {
return new long[] { outputBytes };
}
final long[] ret = new long[reduces];
for (int i = 0; i < reduces; ++i) {
ret[i] = Math.round(outputBytes * reduceBytes[i]);
}
return ret;
}
public long[] getOutputRecords() {
if (0 == reduces) {
return new long[] { outputRecords };
}
final long[] ret = new long[reduces];
for (int i = 0; i < reduces; ++i) {
ret[i] = Math.round(outputRecords * reduceRecords[i]);
}
return ret;
}
public long getReduceBytes(int i) {
return reduceOutputBytes[i];
}
public long getReduceRecords(int i) {
return reduceOutputRecords[i];
}
public ResourceUsageMetrics getMapResourceUsageMetrics() {
return mapMetrics;
}
public ResourceUsageMetrics getReduceResourceUsageMetrics(int i) {
return reduceMetrics[i];
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
WritableUtils.writeVInt(out, id);
WritableUtils.writeVInt(out, maps);
WritableUtils.writeVLong(out, inputRecords);
WritableUtils.writeVLong(out, outputBytes);
WritableUtils.writeVLong(out, outputRecords);
WritableUtils.writeVLong(out, maxMemory);
WritableUtils.writeVInt(out, reduces);
for (int i = 0; i < reduces; ++i) {
out.writeDouble(reduceBytes[i]);
out.writeDouble(reduceRecords[i]);
}
WritableUtils.writeVInt(out, nSpec);
for (int i = 0; i < nSpec; ++i) {
WritableUtils.writeVLong(out, reduceOutputBytes[i]);
WritableUtils.writeVLong(out, reduceOutputRecords[i]);
}
mapMetrics.write(out);
int numReduceMetrics = (reduceMetrics == null) ? 0 : reduceMetrics.length;
WritableUtils.writeVInt(out, numReduceMetrics);
for (int i = 0; i < numReduceMetrics; ++i) {
reduceMetrics[i].write(out);
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
id = WritableUtils.readVInt(in);
maps = WritableUtils.readVInt(in);
inputRecords = WritableUtils.readVLong(in);
outputBytes = WritableUtils.readVLong(in);
outputRecords = WritableUtils.readVLong(in);
maxMemory = WritableUtils.readVLong(in);
reduces = WritableUtils.readVInt(in);
if (reduceBytes.length < reduces) {
reduceBytes = new double[reduces];
reduceRecords = new double[reduces];
}
for (int i = 0; i < reduces; ++i) {
reduceBytes[i] = in.readDouble();
reduceRecords[i] = in.readDouble();
}
nSpec = WritableUtils.readVInt(in);
if (reduceOutputBytes.length < nSpec) {
reduceOutputBytes = new long[nSpec];
reduceOutputRecords = new long[nSpec];
}
for (int i = 0; i < nSpec; ++i) {
reduceOutputBytes[i] = WritableUtils.readVLong(in);
reduceOutputRecords[i] = WritableUtils.readVLong(in);
}
mapMetrics = new ResourceUsageMetrics();
mapMetrics.readFields(in);
int numReduceMetrics = WritableUtils.readVInt(in);
reduceMetrics = new ResourceUsageMetrics[numReduceMetrics];
for (int i = 0; i < numReduceMetrics; ++i) {
reduceMetrics[i] = new ResourceUsageMetrics();
reduceMetrics[i].readFields(in);
}
}
}
| 6,009 | 32.20442 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
class GridmixSplit extends CombineFileSplit {
private int id;
private int nSpec;
private int maps;
private int reduces;
private long inputRecords;
private long outputBytes;
private long outputRecords;
private long maxMemory;
private double[] reduceBytes = new double[0];
private double[] reduceRecords = new double[0];
// Spec for reduces id mod this
private long[] reduceOutputBytes = new long[0];
private long[] reduceOutputRecords = new long[0];
GridmixSplit() {
super();
}
public GridmixSplit(CombineFileSplit cfsplit, int maps, int id,
long inputBytes, long inputRecords, long outputBytes,
long outputRecords, double[] reduceBytes, double[] reduceRecords,
long[] reduceOutputBytes, long[] reduceOutputRecords)
throws IOException {
super(cfsplit);
this.id = id;
this.maps = maps;
reduces = reduceBytes.length;
this.inputRecords = inputRecords;
this.outputBytes = outputBytes;
this.outputRecords = outputRecords;
this.reduceBytes = reduceBytes;
this.reduceRecords = reduceRecords;
nSpec = reduceOutputBytes.length;
this.reduceOutputBytes = reduceOutputBytes;
this.reduceOutputRecords = reduceOutputRecords;
}
public int getId() {
return id;
}
public int getMapCount() {
return maps;
}
public long getInputRecords() {
return inputRecords;
}
public long[] getOutputBytes() {
if (0 == reduces) {
return new long[] { outputBytes };
}
final long[] ret = new long[reduces];
for (int i = 0; i < reduces; ++i) {
ret[i] = Math.round(outputBytes * reduceBytes[i]);
}
return ret;
}
public long[] getOutputRecords() {
if (0 == reduces) {
return new long[] { outputRecords };
}
final long[] ret = new long[reduces];
for (int i = 0; i < reduces; ++i) {
ret[i] = Math.round(outputRecords * reduceRecords[i]);
}
return ret;
}
public long getReduceBytes(int i) {
return reduceOutputBytes[i];
}
public long getReduceRecords(int i) {
return reduceOutputRecords[i];
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
WritableUtils.writeVInt(out, id);
WritableUtils.writeVInt(out, maps);
WritableUtils.writeVLong(out, inputRecords);
WritableUtils.writeVLong(out, outputBytes);
WritableUtils.writeVLong(out, outputRecords);
WritableUtils.writeVLong(out, maxMemory);
WritableUtils.writeVInt(out, reduces);
for (int i = 0; i < reduces; ++i) {
out.writeDouble(reduceBytes[i]);
out.writeDouble(reduceRecords[i]);
}
WritableUtils.writeVInt(out, nSpec);
for (int i = 0; i < nSpec; ++i) {
WritableUtils.writeVLong(out, reduceOutputBytes[i]);
WritableUtils.writeVLong(out, reduceOutputRecords[i]);
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
id = WritableUtils.readVInt(in);
maps = WritableUtils.readVInt(in);
inputRecords = WritableUtils.readVLong(in);
outputBytes = WritableUtils.readVLong(in);
outputRecords = WritableUtils.readVLong(in);
maxMemory = WritableUtils.readVLong(in);
reduces = WritableUtils.readVInt(in);
if (reduceBytes.length < reduces) {
reduceBytes = new double[reduces];
reduceRecords = new double[reduces];
}
for (int i = 0; i < reduces; ++i) {
reduceBytes[i] = in.readDouble();
reduceRecords[i] = in.readDouble();
}
nSpec = WritableUtils.readVInt(in);
if (reduceOutputBytes.length < nSpec) {
reduceOutputBytes = new long[nSpec];
reduceOutputRecords = new long[nSpec];
}
for (int i = 0; i < nSpec; ++i) {
reduceOutputBytes[i] = WritableUtils.readVLong(in);
reduceOutputRecords[i] = WritableUtils.readVLong(in);
}
}
}
| 4,872 | 31.704698 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.nio.charset.Charset;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
import org.apache.hadoop.security.UserGroupInformation;
/**
* GridmixJob that generates distributed cache files.
* {@link GenerateDistCacheData} expects a list of distributed cache files to be
* generated as input. This list is expected to be stored as a sequence file
* and the filename is expected to be configured using
* {@code gridmix.distcache.file.list}.
* This input file contains the list of distributed cache files and their sizes.
* For each record (i.e. file size and file path) in this input file,
* a file with the specific file size at the specific path is created.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
class GenerateDistCacheData extends GridmixJob {
/**
* Number of distributed cache files to be created by gridmix
*/
static final String GRIDMIX_DISTCACHE_FILE_COUNT =
"gridmix.distcache.file.count";
/**
* Total number of bytes to be written to the distributed cache files by
* gridmix. i.e. Sum of sizes of all unique distributed cache files to be
* created by gridmix.
*/
static final String GRIDMIX_DISTCACHE_BYTE_COUNT =
"gridmix.distcache.byte.count";
/**
* The special file created(and used) by gridmix, that contains the list of
* unique distributed cache files that are to be created and their sizes.
*/
static final String GRIDMIX_DISTCACHE_FILE_LIST =
"gridmix.distcache.file.list";
static final String JOB_NAME = "GRIDMIX_GENERATE_DISTCACHE_DATA";
/**
* Create distributed cache file with the permissions 0644.
* Since the private distributed cache directory doesn't have execute
* permission for others, it is OK to set read permission for others for
* the files under that directory and still they will become 'private'
* distributed cache files on the simulated cluster.
*/
static final short GRIDMIX_DISTCACHE_FILE_PERM = 0644;
private static final Charset charsetUTF8 = Charset.forName("UTF-8");
public GenerateDistCacheData(Configuration conf) throws IOException {
super(conf, 0L, JOB_NAME);
}
@Override
public Job call() throws IOException, InterruptedException,
ClassNotFoundException {
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
ugi.doAs( new PrivilegedExceptionAction <Job>() {
public Job run() throws IOException, ClassNotFoundException,
InterruptedException {
job.setMapperClass(GenDCDataMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(BytesWritable.class);
job.setInputFormatClass(GenDCDataFormat.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setJarByClass(GenerateDistCacheData.class);
try {
FileInputFormat.addInputPath(job, new Path("ignored"));
} catch (IOException e) {
LOG.error("Error while adding input path ", e);
}
job.submit();
return job;
}
});
return job;
}
@Override
protected boolean canEmulateCompression() {
return false;
}
public static class GenDCDataMapper
extends Mapper<LongWritable, BytesWritable, NullWritable, BytesWritable> {
private BytesWritable val;
private final Random r = new Random();
private FileSystem fs;
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
val = new BytesWritable(new byte[context.getConfiguration().getInt(
GenerateData.GRIDMIX_VAL_BYTES, 1024 * 1024)]);
fs = FileSystem.get(context.getConfiguration());
}
// Create one distributed cache file with the needed file size.
// key is distributed cache file size and
// value is distributed cache file path.
@Override
public void map(LongWritable key, BytesWritable value, Context context)
throws IOException, InterruptedException {
String fileName = new String(value.getBytes(), 0,
value.getLength(), charsetUTF8);
Path path = new Path(fileName);
FSDataOutputStream dos =
FileSystem.create(fs, path, new FsPermission(GRIDMIX_DISTCACHE_FILE_PERM));
int size = 0;
for (long bytes = key.get(); bytes > 0; bytes -= size) {
r.nextBytes(val.getBytes());
size = (int)Math.min(val.getLength(), bytes);
dos.write(val.getBytes(), 0, size);// Write to distCache file
}
dos.close();
}
}
/**
* InputFormat for GenerateDistCacheData.
* Input to GenerateDistCacheData is the special file(in SequenceFile format)
* that contains the list of distributed cache files to be generated along
* with their file sizes.
*/
static class GenDCDataFormat
extends InputFormat<LongWritable, BytesWritable> {
// Split the special file that contains the list of distributed cache file
// paths and their file sizes such that each split corresponds to
// approximately same amount of distributed cache data to be generated.
// Consider numTaskTrackers * numMapSlotsPerTracker as the number of maps
// for this job, if there is lot of data to be generated.
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
final JobConf jobConf = new JobConf(jobCtxt.getConfiguration());
final JobClient client = new JobClient(jobConf);
ClusterStatus stat = client.getClusterStatus(true);
int numTrackers = stat.getTaskTrackers();
final int fileCount = jobConf.getInt(GRIDMIX_DISTCACHE_FILE_COUNT, -1);
// Total size of distributed cache files to be generated
final long totalSize = jobConf.getLong(GRIDMIX_DISTCACHE_BYTE_COUNT, -1);
// Get the path of the special file
String distCacheFileList = jobConf.get(GRIDMIX_DISTCACHE_FILE_LIST);
if (fileCount < 0 || totalSize < 0 || distCacheFileList == null) {
throw new RuntimeException("Invalid metadata: #files (" + fileCount
+ "), total_size (" + totalSize + "), filelisturi ("
+ distCacheFileList + ")");
}
Path sequenceFile = new Path(distCacheFileList);
FileSystem fs = sequenceFile.getFileSystem(jobConf);
FileStatus srcst = fs.getFileStatus(sequenceFile);
// Consider the number of TTs * mapSlotsPerTracker as number of mappers.
int numMapSlotsPerTracker = jobConf.getInt(TTConfig.TT_MAP_SLOTS, 2);
int numSplits = numTrackers * numMapSlotsPerTracker;
List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
LongWritable key = new LongWritable();
BytesWritable value = new BytesWritable();
// Average size of data to be generated by each map task
final long targetSize = Math.max(totalSize / numSplits,
DistributedCacheEmulator.AVG_BYTES_PER_MAP);
long splitStartPosition = 0L;
long splitEndPosition = 0L;
long acc = 0L;
long bytesRemaining = srcst.getLen();
SequenceFile.Reader reader = null;
try {
reader = new SequenceFile.Reader(fs, sequenceFile, jobConf);
while (reader.next(key, value)) {
// If adding this file would put this split past the target size,
// cut the last split and put this file in the next split.
if (acc + key.get() > targetSize && acc != 0) {
long splitSize = splitEndPosition - splitStartPosition;
splits.add(new FileSplit(
sequenceFile, splitStartPosition, splitSize, (String[])null));
bytesRemaining -= splitSize;
splitStartPosition = splitEndPosition;
acc = 0L;
}
acc += key.get();
splitEndPosition = reader.getPosition();
}
} finally {
if (reader != null) {
reader.close();
}
}
if (bytesRemaining != 0) {
splits.add(new FileSplit(
sequenceFile, splitStartPosition, bytesRemaining, (String[])null));
}
return splits;
}
/**
* Returns a reader for this split of the distributed cache file list.
*/
@Override
public RecordReader<LongWritable, BytesWritable> createRecordReader(
InputSplit split, final TaskAttemptContext taskContext)
throws IOException, InterruptedException {
return new SequenceFileRecordReader<LongWritable, BytesWritable>();
}
}
}
| 10,795 | 39.434457 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
/**
* Random algorithms.
*/
public class RandomAlgorithms {
private interface IndexMapper {
int get(int pos);
void swap(int a, int b);
int getSize();
void reset();
}
/**
* A sparse index mapping table - useful when we want to
* non-destructively permute a small fraction of a large array.
*/
private static class SparseIndexMapper implements IndexMapper {
Map<Integer, Integer> mapping = new HashMap<Integer, Integer>();
int size;
SparseIndexMapper(int size) {
this.size = size;
}
public int get(int pos) {
Integer mapped = mapping.get(pos);
if (mapped == null) return pos;
return mapped;
}
public void swap(int a, int b) {
if (a == b) return;
int valA = get(a);
int valB = get(b);
if (b == valA) {
mapping.remove(b);
} else {
mapping.put(b, valA);
}
if (a == valB) {
mapping.remove(a);
} else {
mapping.put(a, valB);
}
}
public int getSize() {
return size;
}
public void reset() {
mapping.clear();
}
}
/**
* A dense index mapping table - useful when we want to
* non-destructively permute a large fraction of an array.
*/
private static class DenseIndexMapper implements IndexMapper {
int[] mapping;
DenseIndexMapper(int size) {
mapping = new int[size];
for (int i=0; i<size; ++i) {
mapping[i] = i;
}
}
public int get(int pos) {
if ( (pos < 0) || (pos>=mapping.length) ) {
throw new IndexOutOfBoundsException();
}
return mapping[pos];
}
public void swap(int a, int b) {
if (a == b) return;
int valA = get(a);
int valB = get(b);
mapping[a]=valB;
mapping[b]=valA;
}
public int getSize() {
return mapping.length;
}
public void reset() {
return;
}
}
/**
* Iteratively pick random numbers from pool 0..n-1. Each number can only be
* picked once.
*/
public static class Selector {
private IndexMapper mapping;
private int n;
private Random rand;
/**
* Constructor.
*
* @param n
* The pool of integers: 0..n-1.
* @param selPcnt
* Percentage of selected numbers. This is just a hint for internal
* memory optimization.
* @param rand
* Random number generator.
*/
public Selector(int n, double selPcnt, Random rand) {
if (n <= 0) {
throw new IllegalArgumentException("n should be positive");
}
boolean sparse = (n > 200) && (selPcnt < 0.1);
this.n = n;
mapping = (sparse) ? new SparseIndexMapper(n) : new DenseIndexMapper(n);
this.rand = rand;
}
/**
* Select the next random number.
* @return Random number selected. Or -1 if the remaining pool is empty.
*/
public int next() {
switch (n) {
case 0: return -1;
case 1:
{
int index = mapping.get(0);
--n;
return index;
}
default:
{
int pos = rand.nextInt(n);
int index = mapping.get(pos);
mapping.swap(pos, --n);
return index;
}
}
}
/**
* Get the remaining random number pool size.
*/
public int getPoolSize() {
return n;
}
/**
* Reset the selector for reuse usage.
*/
public void reset() {
mapping.reset();
n = mapping.getSize();
}
}
/**
* Selecting m random integers from 0..n-1.
* @return An array of selected integers.
*/
public static int[] select(int m, int n, Random rand) {
if (m >= n) {
int[] ret = new int[n];
for (int i=0; i<n; ++i) {
ret[i] = i;
}
return ret;
}
Selector selector = new Selector(n, (float)m/n, rand);
int[] selected = new int[m];
for (int i=0; i<m; ++i) {
selected[i] = selector.next();
}
return selected;
}
}
| 4,991 | 22.771429 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.gridmix.GenerateData.DataStatistics;
import org.apache.hadoop.mapred.gridmix.Statistics.JobStats;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.tools.rumen.JobStoryProducer;
import org.apache.hadoop.tools.rumen.ZombieJobProducer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Driver class for the Gridmix3 benchmark. Gridmix accepts a timestamped
* stream (trace) of job/task descriptions. For each job in the trace, the
* client will submit a corresponding, synthetic job to the target cluster at
* the rate in the original trace. The intent is to provide a benchmark that
* can be configured and extended to closely match the measured resource
* profile of actual, production loads.
*/
public class Gridmix extends Configured implements Tool {
public static final Log LOG = LogFactory.getLog(Gridmix.class);
/**
* Output (scratch) directory for submitted jobs. Relative paths are
* resolved against the path provided as input and absolute paths remain
* independent of it. The default is "gridmix".
*/
public static final String GRIDMIX_OUT_DIR = "gridmix.output.directory";
/**
* Number of submitting threads at the client and upper bound for
* in-memory split data. Submitting threads precompute InputSplits for
* submitted jobs. This limits the number of splits held in memory waiting
* for submission and also permits parallel computation of split data.
*/
public static final String GRIDMIX_SUB_THR = "gridmix.client.submit.threads";
/**
* The depth of the queue of job descriptions. Before splits are computed,
* a queue of pending descriptions is stored in memoory. This parameter
* limits the depth of that queue.
*/
public static final String GRIDMIX_QUE_DEP =
"gridmix.client.pending.queue.depth";
/**
* Multiplier to accelerate or decelerate job submission. As a crude means of
* sizing a job trace to a cluster, the time separating two jobs is
* multiplied by this factor.
*/
public static final String GRIDMIX_SUB_MUL = "gridmix.submit.multiplier";
/**
* Class used to resolve users in the trace to the list of target users
* on the cluster.
*/
public static final String GRIDMIX_USR_RSV = "gridmix.user.resolve.class";
/**
* The configuration key which determines the duration for which the
* job-monitor sleeps while polling for job status.
* This value should be specified in milliseconds.
*/
public static final String GRIDMIX_JOBMONITOR_SLEEPTIME_MILLIS =
"gridmix.job-monitor.sleep-time-ms";
/**
* Default value for {@link #GRIDMIX_JOBMONITOR_SLEEPTIME_MILLIS}.
*/
public static final int GRIDMIX_JOBMONITOR_SLEEPTIME_MILLIS_DEFAULT = 500;
/**
* The configuration key which determines the total number of job-status
* monitoring threads.
*/
public static final String GRIDMIX_JOBMONITOR_THREADS =
"gridmix.job-monitor.thread-count";
/**
* Default value for {@link #GRIDMIX_JOBMONITOR_THREADS}.
*/
public static final int GRIDMIX_JOBMONITOR_THREADS_DEFAULT = 1;
/**
* Configuration property set in simulated job's configuration whose value is
* set to the corresponding original job's name. This is not configurable by
* gridmix user.
*/
public static final String ORIGINAL_JOB_NAME =
"gridmix.job.original-job-name";
/**
* Configuration property set in simulated job's configuration whose value is
* set to the corresponding original job's id. This is not configurable by
* gridmix user.
*/
public static final String ORIGINAL_JOB_ID = "gridmix.job.original-job-id";
private DistributedCacheEmulator distCacheEmulator;
// Submit data structures
@SuppressWarnings("rawtypes")
private JobFactory factory;
private JobSubmitter submitter;
private JobMonitor monitor;
private Statistics statistics;
private Summarizer summarizer;
// Shutdown hook
private final Shutdown sdh = new Shutdown();
/** Error while parsing/analyzing the arguments to Gridmix */
static final int ARGS_ERROR = 1;
/** Error while trying to start/setup the Gridmix run */
static final int STARTUP_FAILED_ERROR = 2;
/**
* If at least 1 distributed cache file is missing in the expected
* distributed cache dir, Gridmix cannot proceed with emulation of
* distributed cache load.
*/
static final int MISSING_DIST_CACHE_FILES_ERROR = 3;
Gridmix(String[] args) {
summarizer = new Summarizer(args);
}
public Gridmix() {
summarizer = new Summarizer();
}
// Get the input data directory for Gridmix. Input directory is
// <io-path>/input
static Path getGridmixInputDataPath(Path ioPath) {
return new Path(ioPath, "input");
}
/**
* Write random bytes at the path <inputDir> if needed.
* @see org.apache.hadoop.mapred.gridmix.GenerateData
* @return exit status
*/
@SuppressWarnings("deprecation")
protected int writeInputData(long genbytes, Path inputDir)
throws IOException, InterruptedException {
if (genbytes > 0) {
final Configuration conf = getConf();
if (inputDir.getFileSystem(conf).exists(inputDir)) {
LOG.error("Gridmix input data directory " + inputDir
+ " already exists when -generate option is used.\n");
return STARTUP_FAILED_ERROR;
}
// configure the compression ratio if needed
CompressionEmulationUtil.setupDataGeneratorConfig(conf);
final GenerateData genData = new GenerateData(conf, inputDir, genbytes);
LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) +
" of test data...");
launchGridmixJob(genData);
FsShell shell = new FsShell(conf);
try {
LOG.info("Changing the permissions for inputPath " + inputDir.toString());
shell.run(new String[] {"-chmod","-R","777", inputDir.toString()});
} catch (Exception e) {
LOG.error("Couldnt change the file permissions " , e);
throw new IOException(e);
}
LOG.info("Input data generation successful.");
}
return 0;
}
/**
* Write random bytes in the distributed cache files that will be used by all
* simulated jobs of current gridmix run, if files are to be generated.
* Do this as part of the MapReduce job {@link GenerateDistCacheData#JOB_NAME}
* @see org.apache.hadoop.mapred.gridmix.GenerateDistCacheData
*/
protected void writeDistCacheData(Configuration conf)
throws IOException, InterruptedException {
int fileCount =
conf.getInt(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_COUNT, -1);
if (fileCount > 0) {// generate distributed cache files
final GridmixJob genDistCacheData = new GenerateDistCacheData(conf);
LOG.info("Generating distributed cache data of size " + conf.getLong(
GenerateDistCacheData.GRIDMIX_DISTCACHE_BYTE_COUNT, -1));
launchGridmixJob(genDistCacheData);
}
}
// Launch Input/DistCache Data Generation job and wait for completion
void launchGridmixJob(GridmixJob job)
throws IOException, InterruptedException {
submitter.add(job);
// TODO add listeners, use for job dependencies
try {
while (!job.isSubmitted()) {
try {
Thread.sleep(100); // sleep
} catch (InterruptedException ie) {}
}
// wait for completion
job.getJob().waitForCompletion(false);
} catch (ClassNotFoundException e) {
throw new IOException("Internal error", e);
}
if (!job.getJob().isSuccessful()) {
throw new IOException(job.getJob().getJobName() + " job failed!");
}
}
/**
* Create an appropriate {@code JobStoryProducer} object for the
* given trace.
*
* @param traceIn the path to the trace file. The special path
* "-" denotes the standard input stream.
*
* @param conf the configuration to be used.
*
* @throws IOException if there was an error.
*/
protected JobStoryProducer createJobStoryProducer(String traceIn,
Configuration conf) throws IOException {
if ("-".equals(traceIn)) {
return new ZombieJobProducer(System.in, null);
}
return new ZombieJobProducer(new Path(traceIn), null, conf);
}
// get the gridmix job submission policy
protected static GridmixJobSubmissionPolicy getJobSubmissionPolicy(
Configuration conf) {
return GridmixJobSubmissionPolicy.getPolicy(conf,
GridmixJobSubmissionPolicy.STRESS);
}
/**
* Create each component in the pipeline and start it.
* @param conf Configuration data, no keys specific to this context
* @param traceIn Either a Path to the trace data or "-" for
* stdin
* @param ioPath <ioPath>/input/ is the dir from which input data is
* read and <ioPath>/distributedCache/ is the gridmix
* distributed cache directory.
* @param scratchDir Path into which job output is written
* @param startFlag Semaphore for starting job trace pipeline
*/
@SuppressWarnings("unchecked")
private void startThreads(Configuration conf, String traceIn, Path ioPath,
Path scratchDir, CountDownLatch startFlag, UserResolver userResolver)
throws IOException {
try {
Path inputDir = getGridmixInputDataPath(ioPath);
GridmixJobSubmissionPolicy policy = getJobSubmissionPolicy(conf);
LOG.info(" Submission policy is " + policy.name());
statistics = new Statistics(conf, policy.getPollingInterval(), startFlag);
monitor = createJobMonitor(statistics, conf);
int noOfSubmitterThreads =
(policy == GridmixJobSubmissionPolicy.SERIAL)
? 1
: Runtime.getRuntime().availableProcessors() + 1;
int numThreads = conf.getInt(GRIDMIX_SUB_THR, noOfSubmitterThreads);
int queueDep = conf.getInt(GRIDMIX_QUE_DEP, 5);
submitter = createJobSubmitter(monitor, numThreads, queueDep,
new FilePool(conf, inputDir), userResolver,
statistics);
distCacheEmulator = new DistributedCacheEmulator(conf, ioPath);
factory = createJobFactory(submitter, traceIn, scratchDir, conf,
startFlag, userResolver);
factory.jobCreator.setDistCacheEmulator(distCacheEmulator);
if (policy == GridmixJobSubmissionPolicy.SERIAL) {
statistics.addJobStatsListeners(factory);
} else {
statistics.addClusterStatsObservers(factory);
}
// add the gridmix run summarizer to the statistics
statistics.addJobStatsListeners(summarizer.getExecutionSummarizer());
statistics.addClusterStatsObservers(summarizer.getClusterSummarizer());
monitor.start();
submitter.start();
}catch(Exception e) {
LOG.error(" Exception at start " ,e);
throw new IOException(e);
}
}
protected JobMonitor createJobMonitor(Statistics stats, Configuration conf)
throws IOException {
int delay = conf.getInt(GRIDMIX_JOBMONITOR_SLEEPTIME_MILLIS,
GRIDMIX_JOBMONITOR_SLEEPTIME_MILLIS_DEFAULT);
int numThreads = conf.getInt(GRIDMIX_JOBMONITOR_THREADS,
GRIDMIX_JOBMONITOR_THREADS_DEFAULT);
return new JobMonitor(delay, TimeUnit.MILLISECONDS, stats, numThreads);
}
protected JobSubmitter createJobSubmitter(JobMonitor monitor, int threads,
int queueDepth, FilePool pool, UserResolver resolver,
Statistics statistics) throws IOException {
return new JobSubmitter(monitor, threads, queueDepth, pool, statistics);
}
@SuppressWarnings("rawtypes")
protected JobFactory createJobFactory(JobSubmitter submitter, String traceIn,
Path scratchDir, Configuration conf, CountDownLatch startFlag,
UserResolver resolver)
throws IOException {
return GridmixJobSubmissionPolicy.getPolicy(
conf, GridmixJobSubmissionPolicy.STRESS).createJobFactory(
submitter, createJobStoryProducer(traceIn, conf), scratchDir, conf,
startFlag, resolver);
}
private static UserResolver userResolver;
public UserResolver getCurrentUserResolver() {
return userResolver;
}
public int run(final String[] argv) throws IOException, InterruptedException {
int val = -1;
final Configuration conf = getConf();
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
val = ugi.doAs(new PrivilegedExceptionAction<Integer>() {
public Integer run() throws Exception {
return runJob(conf, argv);
}
});
// print the gridmix summary if the run was successful
if (val == 0) {
// print the run summary
System.out.print("\n\n");
System.out.println(summarizer.toString());
}
return val;
}
@SuppressWarnings("deprecation")
private int runJob(Configuration conf, String[] argv)
throws IOException, InterruptedException {
if (argv.length < 2) {
LOG.error("Too few arguments to Gridmix.\n");
printUsage(System.err);
return ARGS_ERROR;
}
long genbytes = -1L;
String traceIn = null;
Path ioPath = null;
URI userRsrc = null;
try {
userResolver = ReflectionUtils.newInstance(conf.getClass(GRIDMIX_USR_RSV,
SubmitterUserResolver.class, UserResolver.class), conf);
for (int i = 0; i < argv.length - 2; ++i) {
if ("-generate".equals(argv[i])) {
genbytes = StringUtils.TraditionalBinaryPrefix.string2long(argv[++i]);
if (genbytes <= 0) {
LOG.error("size of input data to be generated specified using "
+ "-generate option should be nonnegative.\n");
return ARGS_ERROR;
}
} else if ("-users".equals(argv[i])) {
userRsrc = new URI(argv[++i]);
} else {
LOG.error("Unknown option " + argv[i] + " specified.\n");
printUsage(System.err);
return ARGS_ERROR;
}
}
if (userResolver.needsTargetUsersList()) {
if (userRsrc != null) {
if (!userResolver.setTargetUsers(userRsrc, conf)) {
LOG.warn("Ignoring the user resource '" + userRsrc + "'.");
}
} else {
LOG.error(userResolver.getClass()
+ " needs target user list. Use -users option.\n");
printUsage(System.err);
return ARGS_ERROR;
}
} else if (userRsrc != null) {
LOG.warn("Ignoring the user resource '" + userRsrc + "'.");
}
ioPath = new Path(argv[argv.length - 2]);
traceIn = argv[argv.length - 1];
} catch (Exception e) {
LOG.error(e.toString() + "\n");
if (LOG.isDebugEnabled()) {
e.printStackTrace();
}
printUsage(System.err);
return ARGS_ERROR;
}
// Create <ioPath> with 777 permissions
final FileSystem inputFs = ioPath.getFileSystem(conf);
ioPath = ioPath.makeQualified(inputFs);
boolean succeeded = false;
try {
succeeded = FileSystem.mkdirs(inputFs, ioPath,
new FsPermission((short)0777));
} catch(IOException e) {
// No need to emit this exception message
} finally {
if (!succeeded) {
LOG.error("Failed creation of <ioPath> directory " + ioPath + "\n");
return STARTUP_FAILED_ERROR;
}
}
return start(conf, traceIn, ioPath, genbytes, userResolver);
}
/**
*
* @param conf gridmix configuration
* @param traceIn trace file path(if it is '-', then trace comes from the
* stream stdin)
* @param ioPath Working directory for gridmix. GenerateData job
* will generate data in the directory <ioPath>/input/ and
* distributed cache data is generated in the directory
* <ioPath>/distributedCache/, if -generate option is
* specified.
* @param genbytes size of input data to be generated under the directory
* <ioPath>/input/
* @param userResolver gridmix user resolver
* @return exit code
* @throws IOException
* @throws InterruptedException
*/
int start(Configuration conf, String traceIn, Path ioPath, long genbytes,
UserResolver userResolver)
throws IOException, InterruptedException {
DataStatistics stats = null;
InputStream trace = null;
int exitCode = 0;
try {
Path scratchDir = new Path(ioPath, conf.get(GRIDMIX_OUT_DIR, "gridmix"));
// add shutdown hook for SIGINT, etc.
Runtime.getRuntime().addShutdownHook(sdh);
CountDownLatch startFlag = new CountDownLatch(1);
try {
// Create, start job submission threads
startThreads(conf, traceIn, ioPath, scratchDir, startFlag,
userResolver);
Path inputDir = getGridmixInputDataPath(ioPath);
// Write input data if specified
exitCode = writeInputData(genbytes, inputDir);
if (exitCode != 0) {
return exitCode;
}
// publish the data statistics
stats = GenerateData.publishDataStatistics(inputDir, genbytes, conf);
// scan input dir contents
submitter.refreshFilePool();
boolean shouldGenerate = (genbytes > 0);
// set up the needed things for emulation of various loads
exitCode = setupEmulation(conf, traceIn, scratchDir, ioPath,
shouldGenerate);
if (exitCode != 0) {
return exitCode;
}
// start the summarizer
summarizer.start(conf);
factory.start();
statistics.start();
} catch (Throwable e) {
LOG.error("Startup failed. " + e.toString() + "\n");
if (LOG.isDebugEnabled()) {
e.printStackTrace();
}
if (factory != null) factory.abort(); // abort pipeline
exitCode = STARTUP_FAILED_ERROR;
} finally {
// signal for factory to start; sets start time
startFlag.countDown();
}
if (factory != null) {
// wait for input exhaustion
factory.join(Long.MAX_VALUE);
final Throwable badTraceException = factory.error();
if (null != badTraceException) {
LOG.error("Error in trace", badTraceException);
throw new IOException("Error in trace", badTraceException);
}
// wait for pending tasks to be submitted
submitter.shutdown();
submitter.join(Long.MAX_VALUE);
// wait for running tasks to complete
monitor.shutdown();
monitor.join(Long.MAX_VALUE);
statistics.shutdown();
statistics.join(Long.MAX_VALUE);
}
} finally {
if (factory != null) {
summarizer.finalize(factory, traceIn, genbytes, userResolver, stats,
conf);
}
IOUtils.cleanup(LOG, trace);
}
return exitCode;
}
/**
* Create gridmix output directory. Setup things for emulation of
* various loads, if needed.
* @param conf gridmix configuration
* @param traceIn trace file path(if it is '-', then trace comes from the
* stream stdin)
* @param scratchDir gridmix output directory
* @param ioPath Working directory for gridmix.
* @param generate true if -generate option was specified
* @return exit code
* @throws IOException
* @throws InterruptedException
*/
private int setupEmulation(Configuration conf, String traceIn,
Path scratchDir, Path ioPath, boolean generate)
throws IOException, InterruptedException {
// create scratch directory(output directory of gridmix)
final FileSystem scratchFs = scratchDir.getFileSystem(conf);
FileSystem.mkdirs(scratchFs, scratchDir, new FsPermission((short) 0777));
// Setup things needed for emulation of distributed cache load
return setupDistCacheEmulation(conf, traceIn, ioPath, generate);
// Setup emulation of other loads like CPU load, Memory load
}
/**
* Setup gridmix for emulation of distributed cache load. This includes
* generation of distributed cache files, if needed.
* @param conf gridmix configuration
* @param traceIn trace file path(if it is '-', then trace comes from the
* stream stdin)
* @param ioPath <ioPath>/input/ is the dir where input data (a) exists
* or (b) is generated. <ioPath>/distributedCache/ is the
* folder where distributed cache data (a) exists or (b) is to be
* generated by gridmix.
* @param generate true if -generate option was specified
* @return exit code
* @throws IOException
* @throws InterruptedException
*/
private int setupDistCacheEmulation(Configuration conf, String traceIn,
Path ioPath, boolean generate) throws IOException, InterruptedException {
distCacheEmulator.init(traceIn, factory.jobCreator, generate);
int exitCode = 0;
if (distCacheEmulator.shouldGenerateDistCacheData() ||
distCacheEmulator.shouldEmulateDistCacheLoad()) {
JobStoryProducer jsp = createJobStoryProducer(traceIn, conf);
exitCode = distCacheEmulator.setupGenerateDistCacheData(jsp);
if (exitCode == 0) {
// If there are files to be generated, run a MapReduce job to generate
// these distributed cache files of all the simulated jobs of this trace.
writeDistCacheData(conf);
}
}
return exitCode;
}
/**
* Handles orderly shutdown by requesting that each component in the
* pipeline abort its progress, waiting for each to exit and killing
* any jobs still running on the cluster.
*/
class Shutdown extends Thread {
static final long FAC_SLEEP = 1000;
static final long SUB_SLEEP = 4000;
static final long MON_SLEEP = 15000;
private void killComponent(Component<?> component, long maxwait) {
if (component == null) {
return;
}
component.abort();
try {
component.join(maxwait);
} catch (InterruptedException e) {
LOG.warn("Interrupted waiting for " + component);
}
}
@Override
public void run() {
LOG.info("Exiting...");
try {
killComponent(factory, FAC_SLEEP); // read no more tasks
killComponent(submitter, SUB_SLEEP); // submit no more tasks
killComponent(monitor, MON_SLEEP); // process remaining jobs here
killComponent(statistics,MON_SLEEP);
} finally {
if (monitor == null) {
return;
}
List<JobStats> remainingJobs = monitor.getRemainingJobs();
if (remainingJobs.isEmpty()) {
return;
}
LOG.info("Killing running jobs...");
for (JobStats stats : remainingJobs) {
Job job = stats.getJob();
try {
if (!job.isComplete()) {
job.killJob();
LOG.info("Killed " + job.getJobName() + " (" + job.getJobID() + ")");
} else {
if (job.isSuccessful()) {
monitor.onSuccess(job);
} else {
monitor.onFailure(job);
}
}
} catch (IOException e) {
LOG.warn("Failure killing " + job.getJobName(), e);
} catch (Exception e) {
LOG.error("Unexpected exception", e);
}
}
LOG.info("Done.");
}
}
}
public static void main(String[] argv) throws Exception {
int res = -1;
try {
res = ToolRunner.run(new Configuration(), new Gridmix(argv), argv);
} finally {
ExitUtil.terminate(res);
}
}
private String getEnumValues(Enum<?>[] e) {
StringBuilder sb = new StringBuilder();
String sep = "";
for (Enum<?> v : e) {
sb.append(sep);
sb.append(v.name());
sep = "|";
}
return sb.toString();
}
private String getJobTypes() {
return getEnumValues(JobCreator.values());
}
private String getSubmissionPolicies() {
return getEnumValues(GridmixJobSubmissionPolicy.values());
}
protected void printUsage(PrintStream out) {
ToolRunner.printGenericCommandUsage(out);
out.println("Usage: gridmix [-generate <MiB>] [-users URI] [-Dname=value ...] <iopath> <trace>");
out.println(" e.g. gridmix -generate 100m foo -");
out.println("Options:");
out.println(" -generate <MiB> : Generate input data of size MiB under "
+ "<iopath>/input/ and generate\n\t\t distributed cache data under "
+ "<iopath>/distributedCache/.");
out.println(" -users <usersResourceURI> : URI that contains the users list.");
out.println("Configuration parameters:");
out.println(" General parameters:");
out.printf(" %-48s : Output directory%n", GRIDMIX_OUT_DIR);
out.printf(" %-48s : Submitting threads%n", GRIDMIX_SUB_THR);
out.printf(" %-48s : Queued job desc%n", GRIDMIX_QUE_DEP);
out.printf(" %-48s : User resolution class%n", GRIDMIX_USR_RSV);
out.printf(" %-48s : Job types (%s)%n", JobCreator.GRIDMIX_JOB_TYPE, getJobTypes());
out.println(" Parameters related to job submission:");
out.printf(" %-48s : Default queue%n",
GridmixJob.GRIDMIX_DEFAULT_QUEUE);
out.printf(" %-48s : Enable/disable using queues in trace%n",
GridmixJob.GRIDMIX_USE_QUEUE_IN_TRACE);
out.printf(" %-48s : Job submission policy (%s)%n",
GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY, getSubmissionPolicies());
out.println(" Parameters specific for LOADJOB:");
out.printf(" %-48s : Key fraction of rec%n",
AvgRecordFactory.GRIDMIX_KEY_FRC);
out.println(" Parameters specific for SLEEPJOB:");
out.printf(" %-48s : Whether to ignore reduce tasks%n",
SleepJob.SLEEPJOB_MAPTASK_ONLY);
out.printf(" %-48s : Number of fake locations for map tasks%n",
JobCreator.SLEEPJOB_RANDOM_LOCATIONS);
out.printf(" %-48s : Maximum map task runtime in mili-sec%n",
SleepJob.GRIDMIX_SLEEP_MAX_MAP_TIME);
out.printf(" %-48s : Maximum reduce task runtime in mili-sec (merge+reduce)%n",
SleepJob.GRIDMIX_SLEEP_MAX_REDUCE_TIME);
out.println(" Parameters specific for STRESS submission throttling policy:");
out.printf(" %-48s : jobs vs task-tracker ratio%n",
StressJobFactory.CONF_MAX_JOB_TRACKER_RATIO);
out.printf(" %-48s : maps vs map-slot ratio%n",
StressJobFactory.CONF_OVERLOAD_MAPTASK_MAPSLOT_RATIO);
out.printf(" %-48s : reduces vs reduce-slot ratio%n",
StressJobFactory.CONF_OVERLOAD_REDUCETASK_REDUCESLOT_RATIO);
out.printf(" %-48s : map-slot share per job%n",
StressJobFactory.CONF_MAX_MAPSLOT_SHARE_PER_JOB);
out.printf(" %-48s : reduce-slot share per job%n",
StressJobFactory.CONF_MAX_REDUCESLOT_SHARE_PER_JOB);
}
/**
* Components in the pipeline must support the following operations for
* orderly startup and shutdown.
*/
interface Component<T> {
/**
* Accept an item into this component from an upstream component. If
* shutdown or abort have been called, this may fail, depending on the
* semantics for the component.
*/
void add(T item) throws InterruptedException;
/**
* Attempt to start the service.
*/
void start();
/**
* Wait until the service completes. It is assumed that either a
* {@link #shutdown} or {@link #abort} has been requested.
*/
void join(long millis) throws InterruptedException;
/**
* Shut down gracefully, finishing all pending work. Reject new requests.
*/
void shutdown();
/**
* Shut down immediately, aborting any work in progress and discarding
* all pending work. It is legal to store pending work for another
* thread to process.
*/
void abort();
}
// it is need for tests
protected Summarizer getSummarizer() {
return summarizer;
}
}
| 30,016 | 35.785539 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapred.gridmix.GenerateData.DataStatistics;
import org.apache.hadoop.mapred.gridmix.GenerateData.GenDataFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
/**
* This is a utility class for all the compression related modules.
*/
class CompressionEmulationUtil {
static final Log LOG = LogFactory.getLog(CompressionEmulationUtil.class);
/**
* Enable compression usage in GridMix runs.
*/
private static final String COMPRESSION_EMULATION_ENABLE =
"gridmix.compression-emulation.enable";
/**
* Enable input data decompression.
*/
private static final String INPUT_DECOMPRESSION_EMULATION_ENABLE =
"gridmix.compression-emulation.input-decompression.enable";
/**
* Configuration property for setting the compression ratio for map input
* data.
*/
private static final String GRIDMIX_MAP_INPUT_COMPRESSION_RATIO =
"gridmix.compression-emulation.map-input.decompression-ratio";
/**
* Configuration property for setting the compression ratio of map output.
*/
private static final String GRIDMIX_MAP_OUTPUT_COMPRESSION_RATIO =
"gridmix.compression-emulation.map-output.compression-ratio";
/**
* Configuration property for setting the compression ratio of job output.
*/
private static final String GRIDMIX_JOB_OUTPUT_COMPRESSION_RATIO =
"gridmix.compression-emulation.job-output.compression-ratio";
/**
* Default compression ratio.
*/
static final float DEFAULT_COMPRESSION_RATIO = 0.5F;
private static final CompressionRatioLookupTable COMPRESSION_LOOKUP_TABLE =
new CompressionRatioLookupTable();
private static final Charset charsetUTF8 = Charset.forName("UTF-8");
/**
* This is a {@link Mapper} implementation for generating random text data.
* It uses {@link RandomTextDataGenerator} for generating text data and the
* output files are compressed.
*/
public static class RandomTextDataMapper
extends Mapper<NullWritable, LongWritable, Text, Text> {
private RandomTextDataGenerator rtg;
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
int listSize =
RandomTextDataGenerator.getRandomTextDataGeneratorListSize(conf);
int wordSize =
RandomTextDataGenerator.getRandomTextDataGeneratorWordSize(conf);
rtg = new RandomTextDataGenerator(listSize, wordSize);
}
/**
* Emits random words sequence of desired size. Note that the desired output
* size is passed as the value parameter to this map.
*/
@Override
public void map(NullWritable key, LongWritable value, Context context)
throws IOException, InterruptedException {
//TODO Control the extra data written ..
//TODO Should the key\tvalue\n be considered for measuring size?
// Can counters like BYTES_WRITTEN be used? What will be the value of
// such counters in LocalJobRunner?
for (long bytes = value.get(); bytes > 0;) {
String randomKey = rtg.getRandomWord();
String randomValue = rtg.getRandomWord();
context.write(new Text(randomKey), new Text(randomValue));
bytes -= (randomValue.getBytes(charsetUTF8).length +
randomKey.getBytes(charsetUTF8).length);
}
}
}
/**
* Configure the {@link Job} for enabling compression emulation.
*/
static void configure(final Job job) throws IOException, InterruptedException,
ClassNotFoundException {
// set the random text mapper
job.setMapperClass(RandomTextDataMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setInputFormatClass(GenDataFormat.class);
job.setJarByClass(GenerateData.class);
// set the output compression true
FileOutputFormat.setCompressOutput(job, true);
try {
FileInputFormat.addInputPath(job, new Path("ignored"));
} catch (IOException e) {
LOG.error("Error while adding input path ", e);
}
}
/**
* This is the lookup table for mapping compression ratio to the size of the
* word in the {@link RandomTextDataGenerator}'s dictionary.
*
* Note that this table is computed (empirically) using a dictionary of
* default length i.e {@value RandomTextDataGenerator#DEFAULT_LIST_SIZE}.
*/
private static class CompressionRatioLookupTable {
private static Map<Float, Integer> map = new HashMap<Float, Integer>(60);
private static final float MIN_RATIO = 0.07F;
private static final float MAX_RATIO = 0.68F;
// add the empirically obtained data points in the lookup table
CompressionRatioLookupTable() {
map.put(.07F,30);
map.put(.08F,25);
map.put(.09F,60);
map.put(.10F,20);
map.put(.11F,70);
map.put(.12F,15);
map.put(.13F,80);
map.put(.14F,85);
map.put(.15F,90);
map.put(.16F,95);
map.put(.17F,100);
map.put(.18F,105);
map.put(.19F,110);
map.put(.20F,115);
map.put(.21F,120);
map.put(.22F,125);
map.put(.23F,130);
map.put(.24F,140);
map.put(.25F,145);
map.put(.26F,150);
map.put(.27F,155);
map.put(.28F,160);
map.put(.29F,170);
map.put(.30F,175);
map.put(.31F,180);
map.put(.32F,190);
map.put(.33F,195);
map.put(.34F,205);
map.put(.35F,215);
map.put(.36F,225);
map.put(.37F,230);
map.put(.38F,240);
map.put(.39F,250);
map.put(.40F,260);
map.put(.41F,270);
map.put(.42F,280);
map.put(.43F,295);
map.put(.44F,310);
map.put(.45F,325);
map.put(.46F,335);
map.put(.47F,355);
map.put(.48F,375);
map.put(.49F,395);
map.put(.50F,420);
map.put(.51F,440);
map.put(.52F,465);
map.put(.53F,500);
map.put(.54F,525);
map.put(.55F,550);
map.put(.56F,600);
map.put(.57F,640);
map.put(.58F,680);
map.put(.59F,734);
map.put(.60F,813);
map.put(.61F,905);
map.put(.62F,1000);
map.put(.63F,1055);
map.put(.64F,1160);
map.put(.65F,1355);
map.put(.66F,1510);
map.put(.67F,1805);
map.put(.68F,2170);
}
/**
* Returns the size of the word in {@link RandomTextDataGenerator}'s
* dictionary that can generate text with the desired compression ratio.
*
* @throws RuntimeException If ratio is less than {@value #MIN_RATIO} or
* greater than {@value #MAX_RATIO}.
*/
int getWordSizeForRatio(float ratio) {
ratio = standardizeCompressionRatio(ratio);
if (ratio >= MIN_RATIO && ratio <= MAX_RATIO) {
return map.get(ratio);
} else {
throw new RuntimeException("Compression ratio should be in the range ["
+ MIN_RATIO + "," + MAX_RATIO + "]. Configured compression ratio is "
+ ratio + ".");
}
}
}
/**
* Setup the data generator's configuration to generate compressible random
* text data with the desired compression ratio.
* Note that the compression ratio, if configured, will set the
* {@link RandomTextDataGenerator}'s list-size and word-size based on
* empirical values using the compression ratio set in the configuration.
*
* Hence to achieve the desired compression ratio,
* {@link RandomTextDataGenerator}'s list-size will be set to the default
* value i.e {@value RandomTextDataGenerator#DEFAULT_LIST_SIZE}.
*/
static void setupDataGeneratorConfig(Configuration conf) {
boolean compress = isCompressionEmulationEnabled(conf);
if (compress) {
float ratio = getMapInputCompressionEmulationRatio(conf);
LOG.info("GridMix is configured to generate compressed input data with "
+ " a compression ratio of " + ratio);
int wordSize = COMPRESSION_LOOKUP_TABLE.getWordSizeForRatio(ratio);
RandomTextDataGenerator.setRandomTextDataGeneratorWordSize(conf,
wordSize);
// since the compression ratios are computed using the default value of
// list size
RandomTextDataGenerator.setRandomTextDataGeneratorListSize(conf,
RandomTextDataGenerator.DEFAULT_LIST_SIZE);
}
}
/**
* Returns a {@link RandomTextDataGenerator} that generates random
* compressible text with the desired compression ratio.
*/
static RandomTextDataGenerator getRandomTextDataGenerator(float ratio,
long seed) {
int wordSize = COMPRESSION_LOOKUP_TABLE.getWordSizeForRatio(ratio);
RandomTextDataGenerator rtg =
new RandomTextDataGenerator(RandomTextDataGenerator.DEFAULT_LIST_SIZE,
seed, wordSize);
return rtg;
}
/** Publishes compression related data statistics. Following statistics are
* published
* <ul>
* <li>Total compressed input data size</li>
* <li>Number of compressed input data files</li>
* <li>Compression Ratio</li>
* <li>Text data dictionary size</li>
* <li>Random text word size</li>
* </ul>
*/
static DataStatistics publishCompressedDataStatistics(Path inputDir,
Configuration conf, long uncompressedDataSize)
throws IOException {
FileSystem fs = inputDir.getFileSystem(conf);
CompressionCodecFactory compressionCodecs =
new CompressionCodecFactory(conf);
// iterate over compressed files and sum up the compressed file sizes
long compressedDataSize = 0;
int numCompressedFiles = 0;
// obtain input data file statuses
FileStatus[] outFileStatuses =
fs.listStatus(inputDir, new Utils.OutputFileUtils.OutputFilesFilter());
for (FileStatus status : outFileStatuses) {
// check if the input file is compressed
if (compressionCodecs != null) {
CompressionCodec codec = compressionCodecs.getCodec(status.getPath());
if (codec != null) {
++numCompressedFiles;
compressedDataSize += status.getLen();
}
}
}
LOG.info("Gridmix is configured to use compressed input data.");
// publish the input data size
LOG.info("Total size of compressed input data : "
+ StringUtils.humanReadableInt(compressedDataSize));
LOG.info("Total number of compressed input data files : "
+ numCompressedFiles);
if (numCompressedFiles == 0) {
throw new RuntimeException("No compressed file found in the input"
+ " directory : " + inputDir.toString() + ". To enable compression"
+ " emulation, run Gridmix either with "
+ " an input directory containing compressed input file(s) or"
+ " use the -generate option to (re)generate it. If compression"
+ " emulation is not desired, disable it by setting '"
+ COMPRESSION_EMULATION_ENABLE + "' to 'false'.");
}
// publish compression ratio only if its generated in this gridmix run
if (uncompressedDataSize > 0) {
// compute the compression ratio
double ratio = ((double)compressedDataSize) / uncompressedDataSize;
// publish the compression ratio
LOG.info("Input Data Compression Ratio : " + ratio);
}
return new DataStatistics(compressedDataSize, numCompressedFiles, true);
}
/**
* Enables/Disables compression emulation.
* @param conf Target configuration where the parameter
* {@value #COMPRESSION_EMULATION_ENABLE} will be set.
* @param val The value to be set.
*/
static void setCompressionEmulationEnabled(Configuration conf, boolean val) {
conf.setBoolean(COMPRESSION_EMULATION_ENABLE, val);
}
/**
* Checks if compression emulation is enabled or not. Default is {@code true}.
*/
static boolean isCompressionEmulationEnabled(Configuration conf) {
return conf.getBoolean(COMPRESSION_EMULATION_ENABLE, true);
}
/**
* Enables/Disables input decompression emulation.
* @param conf Target configuration where the parameter
* {@value #INPUT_DECOMPRESSION_EMULATION_ENABLE} will be set.
* @param val The value to be set.
*/
static void setInputCompressionEmulationEnabled(Configuration conf,
boolean val) {
conf.setBoolean(INPUT_DECOMPRESSION_EMULATION_ENABLE, val);
}
/**
* Check if input decompression emulation is enabled or not.
* Default is {@code false}.
*/
static boolean isInputCompressionEmulationEnabled(Configuration conf) {
return conf.getBoolean(INPUT_DECOMPRESSION_EMULATION_ENABLE, false);
}
/**
* Set the map input data compression ratio in the given conf.
*/
static void setMapInputCompressionEmulationRatio(Configuration conf,
float ratio) {
conf.setFloat(GRIDMIX_MAP_INPUT_COMPRESSION_RATIO, ratio);
}
/**
* Get the map input data compression ratio using the given configuration.
* If the compression ratio is not set in the configuration then use the
* default value i.e {@value #DEFAULT_COMPRESSION_RATIO}.
*/
static float getMapInputCompressionEmulationRatio(Configuration conf) {
return conf.getFloat(GRIDMIX_MAP_INPUT_COMPRESSION_RATIO,
DEFAULT_COMPRESSION_RATIO);
}
/**
* Set the map output data compression ratio in the given configuration.
*/
static void setMapOutputCompressionEmulationRatio(Configuration conf,
float ratio) {
conf.setFloat(GRIDMIX_MAP_OUTPUT_COMPRESSION_RATIO, ratio);
}
/**
* Get the map output data compression ratio using the given configuration.
* If the compression ratio is not set in the configuration then use the
* default value i.e {@value #DEFAULT_COMPRESSION_RATIO}.
*/
static float getMapOutputCompressionEmulationRatio(Configuration conf) {
return conf.getFloat(GRIDMIX_MAP_OUTPUT_COMPRESSION_RATIO,
DEFAULT_COMPRESSION_RATIO);
}
/**
* Set the job output data compression ratio in the given configuration.
*/
static void setJobOutputCompressionEmulationRatio(Configuration conf,
float ratio) {
conf.setFloat(GRIDMIX_JOB_OUTPUT_COMPRESSION_RATIO, ratio);
}
/**
* Get the job output data compression ratio using the given configuration.
* If the compression ratio is not set in the configuration then use the
* default value i.e {@value #DEFAULT_COMPRESSION_RATIO}.
*/
static float getJobOutputCompressionEmulationRatio(Configuration conf) {
return conf.getFloat(GRIDMIX_JOB_OUTPUT_COMPRESSION_RATIO,
DEFAULT_COMPRESSION_RATIO);
}
/**
* Standardize the compression ratio i.e round off the compression ratio to
* only 2 significant digits.
*/
static float standardizeCompressionRatio(float ratio) {
// round off to 2 significant digits
int significant = (int)Math.round(ratio * 100);
return ((float)significant)/100;
}
/**
* Returns a {@link InputStream} for a file that might be compressed.
*/
static InputStream getPossiblyDecompressedInputStream(Path file,
Configuration conf,
long offset)
throws IOException {
FileSystem fs = file.getFileSystem(conf);
if (isCompressionEmulationEnabled(conf)
&& isInputCompressionEmulationEnabled(conf)) {
CompressionCodecFactory compressionCodecs =
new CompressionCodecFactory(conf);
CompressionCodec codec = compressionCodecs.getCodec(file);
if (codec != null) {
Decompressor decompressor = CodecPool.getDecompressor(codec);
if (decompressor != null) {
CompressionInputStream in =
codec.createInputStream(fs.open(file), decompressor);
//TODO Seek doesnt work with compressed input stream.
// Use SplittableCompressionCodec?
return (InputStream)in;
}
}
}
FSDataInputStream in = fs.open(file);
in.seek(offset);
return (InputStream)in;
}
/**
* Returns a {@link OutputStream} for a file that might need
* compression.
*/
static OutputStream getPossiblyCompressedOutputStream(Path file,
Configuration conf)
throws IOException {
FileSystem fs = file.getFileSystem(conf);
JobConf jConf = new JobConf(conf);
if (org.apache.hadoop.mapred.FileOutputFormat.getCompressOutput(jConf)) {
// get the codec class
Class<? extends CompressionCodec> codecClass =
org.apache.hadoop.mapred.FileOutputFormat
.getOutputCompressorClass(jConf,
GzipCodec.class);
// get the codec implementation
CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
// add the appropriate extension
file = file.suffix(codec.getDefaultExtension());
if (isCompressionEmulationEnabled(conf)) {
FSDataOutputStream fileOut = fs.create(file, false);
return new DataOutputStream(codec.createOutputStream(fileOut));
}
}
return fs.create(file, false);
}
/**
* Extracts compression/decompression related configuration parameters from
* the source configuration to the target configuration.
*/
static void configureCompressionEmulation(Configuration source,
Configuration target) {
// enable output compression
target.setBoolean(FileOutputFormat.COMPRESS,
source.getBoolean(FileOutputFormat.COMPRESS, false));
// set the job output compression codec
String jobOutputCompressionCodec =
source.get(FileOutputFormat.COMPRESS_CODEC);
if (jobOutputCompressionCodec != null) {
target.set(FileOutputFormat.COMPRESS_CODEC, jobOutputCompressionCodec);
}
// set the job output compression type
String jobOutputCompressionType =
source.get(FileOutputFormat.COMPRESS_TYPE);
if (jobOutputCompressionType != null) {
target.set(FileOutputFormat.COMPRESS_TYPE, jobOutputCompressionType);
}
// enable map output compression
target.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,
source.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, false));
// set the map output compression codecs
String mapOutputCompressionCodec =
source.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC);
if (mapOutputCompressionCodec != null) {
target.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,
mapOutputCompressionCodec);
}
// enable input decompression
//TODO replace with mapInputBytes and hdfsBytesRead
Path[] inputs =
org.apache.hadoop.mapred.FileInputFormat
.getInputPaths(new JobConf(source));
boolean needsCompressedInput = false;
CompressionCodecFactory compressionCodecs =
new CompressionCodecFactory(source);
for (Path input : inputs) {
CompressionCodec codec = compressionCodecs.getCodec(input);
if (codec != null) {
needsCompressedInput = true;
}
}
setInputCompressionEmulationEnabled(target, needsCompressedInput);
}
/**
* Get the uncompressed input bytes count from the given possibly compressed
* input bytes count.
* @param possiblyCompressedInputBytes input bytes count. This is compressed
* input size if compression emulation is on.
* @param conf configuration of the Gridmix simulated job
* @return uncompressed input bytes count. Compute this in case if compressed
* input was used
*/
static long getUncompressedInputBytes(long possiblyCompressedInputBytes,
Configuration conf) {
long uncompressedInputBytes = possiblyCompressedInputBytes;
if (CompressionEmulationUtil.isInputCompressionEmulationEnabled(conf)) {
float inputCompressionRatio =
CompressionEmulationUtil.getMapInputCompressionEmulationRatio(conf);
uncompressedInputBytes /= inputCompressionRatio;
}
return uncompressedInputBytes;
}
}
| 22,690 | 36.818333 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
/**
* Factory passing reduce specification as its last record.
*/
class IntermediateRecordFactory extends RecordFactory {
private final GridmixKey.Spec spec;
private final RecordFactory factory;
private final int partition;
private final long targetRecords;
private boolean done = false;
private long accRecords = 0L;
/**
* @param targetBytes Expected byte count.
* @param targetRecords Expected record count; will emit spec records after
* this boundary is passed.
* @param partition Reduce to which records are emitted.
* @param spec Specification to emit.
* @param conf Unused.
*/
public IntermediateRecordFactory(long targetBytes, long targetRecords,
int partition, GridmixKey.Spec spec, Configuration conf) {
this(new AvgRecordFactory(targetBytes, targetRecords, conf), partition,
targetRecords, spec, conf);
}
/**
* @param factory Factory from which byte/record counts are obtained.
* @param partition Reduce to which records are emitted.
* @param targetRecords Expected record count; will emit spec records after
* this boundary is passed.
* @param spec Specification to emit.
* @param conf Unused.
*/
public IntermediateRecordFactory(RecordFactory factory, int partition,
long targetRecords, GridmixKey.Spec spec, Configuration conf) {
this.spec = spec;
this.factory = factory;
this.partition = partition;
this.targetRecords = targetRecords;
}
@Override
public boolean next(GridmixKey key, GridmixRecord val) throws IOException {
assert key != null;
final boolean rslt = factory.next(key, val);
++accRecords;
if (rslt) {
if (accRecords < targetRecords) {
key.setType(GridmixKey.DATA);
} else {
final int orig = key.getSize();
key.setType(GridmixKey.REDUCE_SPEC);
spec.rec_in = accRecords;
key.setSpec(spec);
val.setSize(val.getSize() - (key.getSize() - orig));
// reset counters
accRecords = 0L;
spec.bytes_out = 0L;
spec.rec_out = 0L;
done = true;
}
} else if (!done) {
// ensure spec emitted
key.setType(GridmixKey.REDUCE_SPEC);
key.setPartition(partition);
key.setSize(0);
val.setSize(0);
spec.rec_in = 0L;
key.setSpec(spec);
done = true;
return true;
}
key.setPartition(partition);
return rslt;
}
@Override
public float getProgress() throws IOException {
return factory.getProgress();
}
@Override
public void close() throws IOException {
factory.close();
}
}
| 3,570 | 31.171171 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
class GridmixKey extends GridmixRecord {
static final byte REDUCE_SPEC = 0;
static final byte DATA = 1;
static final int META_BYTES = 1;
private byte type;
private int partition; // NOT serialized
private Spec spec = new Spec();
GridmixKey() {
this(DATA, 1, 0L);
}
GridmixKey(byte type, int size, long seed) {
super(size, seed);
this.type = type;
// setting type may change pcnt random bytes
setSize(size);
}
@Override
public int getSize() {
switch (type) {
case REDUCE_SPEC:
return super.getSize() + spec.getSize() + META_BYTES;
case DATA:
return super.getSize() + META_BYTES;
default:
throw new IllegalStateException("Invalid type: " + type);
}
}
@Override
public void setSize(int size) {
switch (type) {
case REDUCE_SPEC:
super.setSize(size - (META_BYTES + spec.getSize()));
break;
case DATA:
super.setSize(size - META_BYTES);
break;
default:
throw new IllegalStateException("Invalid type: " + type);
}
}
/**
* Partition is not serialized.
*/
public int getPartition() {
return partition;
}
public void setPartition(int partition) {
this.partition = partition;
}
public long getReduceInputRecords() {
assert REDUCE_SPEC == getType();
return spec.rec_in;
}
public void setReduceInputRecords(long rec_in) {
assert REDUCE_SPEC == getType();
final int origSize = getSize();
spec.rec_in = rec_in;
setSize(origSize);
}
public long getReduceOutputRecords() {
assert REDUCE_SPEC == getType();
return spec.rec_out;
}
public void setReduceOutputRecords(long rec_out) {
assert REDUCE_SPEC == getType();
final int origSize = getSize();
spec.rec_out = rec_out;
setSize(origSize);
}
public long getReduceOutputBytes() {
assert REDUCE_SPEC == getType();
return spec.bytes_out;
};
public void setReduceOutputBytes(long b_out) {
assert REDUCE_SPEC == getType();
final int origSize = getSize();
spec.bytes_out = b_out;
setSize(origSize);
}
/**
* Get the {@link ResourceUsageMetrics} stored in the key.
*/
public ResourceUsageMetrics getReduceResourceUsageMetrics() {
assert REDUCE_SPEC == getType();
return spec.metrics;
}
/**
* Store the {@link ResourceUsageMetrics} in the key.
*/
public void setReduceResourceUsageMetrics(ResourceUsageMetrics metrics) {
assert REDUCE_SPEC == getType();
spec.setResourceUsageSpecification(metrics);
}
public byte getType() {
return type;
}
public void setType(byte type) throws IOException {
final int origSize = getSize();
switch (type) {
case REDUCE_SPEC:
case DATA:
this.type = type;
break;
default:
throw new IOException("Invalid type: " + type);
}
setSize(origSize);
}
public void setSpec(Spec spec) {
assert REDUCE_SPEC == getType();
final int origSize = getSize();
this.spec.set(spec);
setSize(origSize);
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
setType(in.readByte());
if (REDUCE_SPEC == getType()) {
spec.readFields(in);
}
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
final byte t = getType();
out.writeByte(t);
if (REDUCE_SPEC == t) {
spec.write(out);
}
}
int fixedBytes() {
return super.fixedBytes() +
(REDUCE_SPEC == getType() ? spec.getSize() : 0) + META_BYTES;
}
@Override
public int compareTo(GridmixRecord other) {
final GridmixKey o = (GridmixKey) other;
final byte t1 = getType();
final byte t2 = o.getType();
if (t1 != t2) {
return t1 - t2;
}
return super.compareTo(other);
}
/**
* Note that while the spec is not explicitly included, changing the spec
* may change its size, which will affect equality.
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other != null && other.getClass() == getClass()) {
final GridmixKey o = ((GridmixKey)other);
return getType() == o.getType() && super.equals(o);
}
return false;
}
@Override
public int hashCode() {
return super.hashCode() ^ getType();
}
public static class Spec implements Writable {
long rec_in;
long rec_out;
long bytes_out;
private ResourceUsageMetrics metrics = null;
private int sizeOfResourceUsageMetrics = 0;
public Spec() { }
public void set(Spec other) {
rec_in = other.rec_in;
bytes_out = other.bytes_out;
rec_out = other.rec_out;
setResourceUsageSpecification(other.metrics);
}
/**
* Sets the {@link ResourceUsageMetrics} for this {@link Spec}.
*/
public void setResourceUsageSpecification(ResourceUsageMetrics metrics) {
this.metrics = metrics;
if (metrics != null) {
this.sizeOfResourceUsageMetrics = metrics.size();
} else {
this.sizeOfResourceUsageMetrics = 0;
}
}
public int getSize() {
return WritableUtils.getVIntSize(rec_in) +
WritableUtils.getVIntSize(rec_out) +
WritableUtils.getVIntSize(bytes_out) +
WritableUtils.getVIntSize(sizeOfResourceUsageMetrics) +
sizeOfResourceUsageMetrics;
}
@Override
public void readFields(DataInput in) throws IOException {
rec_in = WritableUtils.readVLong(in);
rec_out = WritableUtils.readVLong(in);
bytes_out = WritableUtils.readVLong(in);
sizeOfResourceUsageMetrics = WritableUtils.readVInt(in);
if (sizeOfResourceUsageMetrics > 0) {
metrics = new ResourceUsageMetrics();
metrics.readFields(in);
}
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, rec_in);
WritableUtils.writeVLong(out, rec_out);
WritableUtils.writeVLong(out, bytes_out);
WritableUtils.writeVInt(out, sizeOfResourceUsageMetrics);
if (sizeOfResourceUsageMetrics > 0) {
metrics.write(out);
}
}
}
public static class Comparator extends GridmixRecord.Comparator {
private final DataInputBuffer di = new DataInputBuffer();
private final byte[] reset = di.getData();
public Comparator() {
super(GridmixKey.class);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
di.reset(b1, s1, l1);
final int x1 = WritableUtils.readVInt(di);
di.reset(b2, s2, l2);
final int x2 = WritableUtils.readVInt(di);
final int ret = (b1[s1 + x1] != b2[s2 + x2])
? b1[s1 + x1] - b2[s2 + x2]
: super.compare(b1, s1, x1, b2, s2, x2);
di.reset(reset, 0, 0);
return ret;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
static {
WritableComparator.define(GridmixKey.class, new Comparator());
}
}
}
| 8,271 | 26.390728 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageMatcher;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.tools.rumen.TaskInfo;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
/**
* Synthetic job generated from a trace description.
*/
class LoadJob extends GridmixJob {
public static final Log LOG = LogFactory.getLog(LoadJob.class);
public LoadJob(final Configuration conf, long submissionMillis,
final JobStory jobdesc, Path outRoot, UserGroupInformation ugi,
final int seq) throws IOException {
super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
}
public Job call() throws IOException, InterruptedException,
ClassNotFoundException {
ugi.doAs(
new PrivilegedExceptionAction<Job>() {
public Job run() throws IOException, ClassNotFoundException,
InterruptedException {
job.setMapperClass(LoadMapper.class);
job.setReducerClass(LoadReducer.class);
job.setNumReduceTasks(jobdesc.getNumberReduces());
job.setMapOutputKeyClass(GridmixKey.class);
job.setMapOutputValueClass(GridmixRecord.class);
job.setSortComparatorClass(LoadSortComparator.class);
job.setGroupingComparatorClass(SpecGroupingComparator.class);
job.setInputFormatClass(LoadInputFormat.class);
job.setOutputFormatClass(RawBytesOutputFormat.class);
job.setPartitionerClass(DraftPartitioner.class);
job.setJarByClass(LoadJob.class);
job.getConfiguration().setBoolean(Job.USED_GENERIC_PARSER, true);
FileOutputFormat.setOutputPath(job, outdir);
job.submit();
return job;
}
});
return job;
}
@Override
protected boolean canEmulateCompression() {
return true;
}
/**
* This is a load matching key comparator which will make sure that the
* resource usage load is matched even when the framework is in control.
*/
public static class LoadSortComparator extends GridmixKey.Comparator {
private ResourceUsageMatcherRunner matcher = null;
private boolean isConfigured = false;
public LoadSortComparator() {
super();
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
configure();
int ret = super.compare(b1, s1, l1, b2, s2, l2);
if (matcher != null) {
try {
matcher.match(); // match the resource usage now
} catch (Exception e) {}
}
return ret;
}
//TODO Note that the sorter will be instantiated 2 times as follows
// 1. During the sort/spill in the map phase
// 2. During the merge in the sort phase
// We need the handle to the matcher thread only in (2).
// This logic can be relaxed to run only in (2).
private void configure() {
if (!isConfigured) {
ThreadGroup group = Thread.currentThread().getThreadGroup();
Thread[] threads = new Thread[group.activeCount() * 2];
group.enumerate(threads, true);
for (Thread t : threads) {
if (t != null && (t instanceof ResourceUsageMatcherRunner)) {
this.matcher = (ResourceUsageMatcherRunner) t;
isConfigured = true;
break;
}
}
}
}
}
/**
* This is a progress based resource usage matcher.
*/
@SuppressWarnings("unchecked")
static class ResourceUsageMatcherRunner extends Thread
implements Progressive {
private final ResourceUsageMatcher matcher;
private final BoostingProgress progress;
private final long sleepTime;
private static final String SLEEP_CONFIG =
"gridmix.emulators.resource-usage.sleep-duration";
private static final long DEFAULT_SLEEP_TIME = 100; // 100ms
/**
* This is a progress bar that can be boosted for weaker use-cases.
*/
private static class BoostingProgress implements Progressive {
private float boostValue = 0f;
TaskInputOutputContext context;
BoostingProgress(TaskInputOutputContext context) {
this.context = context;
}
void setBoostValue(float boostValue) {
this.boostValue = boostValue;
}
@Override
public float getProgress() {
return Math.min(1f, context.getProgress() + boostValue);
}
}
ResourceUsageMatcherRunner(final TaskInputOutputContext context,
ResourceUsageMetrics metrics) {
Configuration conf = context.getConfiguration();
// set the resource calculator plugin
Class<? extends ResourceCalculatorPlugin> clazz =
conf.getClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
null, ResourceCalculatorPlugin.class);
ResourceCalculatorPlugin plugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
// set the other parameters
this.sleepTime = conf.getLong(SLEEP_CONFIG, DEFAULT_SLEEP_TIME);
progress = new BoostingProgress(context);
// instantiate a resource-usage-matcher
matcher = new ResourceUsageMatcher();
matcher.configure(conf, plugin, metrics, progress);
}
protected void match() throws IOException, InterruptedException {
// match the resource usage
matcher.matchResourceUsage();
}
@Override
public void run() {
LOG.info("Resource usage matcher thread started.");
try {
while (progress.getProgress() < 1) {
// match
match();
// sleep for some time
try {
Thread.sleep(sleepTime);
} catch (Exception e) {}
}
// match for progress = 1
match();
LOG.info("Resource usage emulation complete! Matcher exiting");
} catch (Exception e) {
LOG.info("Exception while running the resource-usage-emulation matcher"
+ " thread! Exiting.", e);
}
}
@Override
public float getProgress() {
return matcher.getProgress();
}
// boost the progress bar as fasten up the emulation cycles.
void boost(float value) {
progress.setBoostValue(value);
}
}
// Makes sure that the TaskTracker doesn't kill the map/reduce tasks while
// they are emulating
private static class StatusReporter extends Thread {
private final TaskAttemptContext context;
private final Progressive progress;
StatusReporter(TaskAttemptContext context, Progressive progress) {
this.context = context;
this.progress = progress;
}
@Override
public void run() {
LOG.info("Status reporter thread started.");
try {
while (!isInterrupted() && progress.getProgress() < 1) {
// report progress
context.progress();
// sleep for some time
try {
Thread.sleep(100); // sleep for 100ms
} catch (Exception e) {}
}
LOG.info("Status reporter thread exiting");
} catch (Exception e) {
LOG.info("Exception while running the status reporter thread!", e);
}
}
}
public static class LoadMapper
extends Mapper<NullWritable, GridmixRecord, GridmixKey, GridmixRecord> {
private double acc;
private double ratio;
private final ArrayList<RecordFactory> reduces =
new ArrayList<RecordFactory>();
private final Random r = new Random();
private final GridmixKey key = new GridmixKey();
private final GridmixRecord val = new GridmixRecord();
private ResourceUsageMatcherRunner matcher = null;
private StatusReporter reporter = null;
@Override
protected void setup(Context ctxt)
throws IOException, InterruptedException {
final Configuration conf = ctxt.getConfiguration();
final LoadSplit split = (LoadSplit) ctxt.getInputSplit();
final int maps = split.getMapCount();
final long[] reduceBytes = split.getOutputBytes();
final long[] reduceRecords = split.getOutputRecords();
long totalRecords = 0L;
final int nReduces = ctxt.getNumReduceTasks();
if (nReduces > 0) {
// enable gridmix map output record for compression
boolean emulateMapOutputCompression =
CompressionEmulationUtil.isCompressionEmulationEnabled(conf)
&& conf.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, false);
float compressionRatio = 1.0f;
if (emulateMapOutputCompression) {
compressionRatio =
CompressionEmulationUtil.getMapOutputCompressionEmulationRatio(conf);
LOG.info("GridMix is configured to use a compression ratio of "
+ compressionRatio + " for the map output data.");
key.setCompressibility(true, compressionRatio);
val.setCompressibility(true, compressionRatio);
}
int idx = 0;
int id = split.getId();
for (int i = 0; i < nReduces; ++i) {
final GridmixKey.Spec spec = new GridmixKey.Spec();
if (i == id) {
spec.bytes_out = split.getReduceBytes(idx);
spec.rec_out = split.getReduceRecords(idx);
spec.setResourceUsageSpecification(
split.getReduceResourceUsageMetrics(idx));
++idx;
id += maps;
}
// set the map output bytes such that the final reduce input bytes
// match the expected value obtained from the original job
long mapOutputBytes = reduceBytes[i];
if (emulateMapOutputCompression) {
mapOutputBytes /= compressionRatio;
}
reduces.add(new IntermediateRecordFactory(
new AvgRecordFactory(mapOutputBytes, reduceRecords[i], conf,
5*1024),
i, reduceRecords[i], spec, conf));
totalRecords += reduceRecords[i];
}
} else {
long mapOutputBytes = reduceBytes[0];
// enable gridmix job output compression
boolean emulateJobOutputCompression =
CompressionEmulationUtil.isCompressionEmulationEnabled(conf)
&& conf.getBoolean(FileOutputFormat.COMPRESS, false);
if (emulateJobOutputCompression) {
float compressionRatio =
CompressionEmulationUtil.getJobOutputCompressionEmulationRatio(conf);
LOG.info("GridMix is configured to use a compression ratio of "
+ compressionRatio + " for the job output data.");
key.setCompressibility(true, compressionRatio);
val.setCompressibility(true, compressionRatio);
// set the output size accordingly
mapOutputBytes /= compressionRatio;
}
reduces.add(new AvgRecordFactory(mapOutputBytes, reduceRecords[0],
conf, 5*1024));
totalRecords = reduceRecords[0];
}
final long splitRecords = split.getInputRecords();
int missingRecSize =
conf.getInt(AvgRecordFactory.GRIDMIX_MISSING_REC_SIZE, 64*1024);
final long inputRecords =
(splitRecords <= 0 && split.getLength() >= 0)
? Math.max(1, split.getLength() / missingRecSize)
: splitRecords;
ratio = totalRecords / (1.0 * inputRecords);
acc = 0.0;
matcher = new ResourceUsageMatcherRunner(ctxt,
split.getMapResourceUsageMetrics());
matcher.setDaemon(true);
// start the status reporter thread
reporter = new StatusReporter(ctxt, matcher);
reporter.setDaemon(true);
reporter.start();
}
@Override
public void map(NullWritable ignored, GridmixRecord rec,
Context context) throws IOException, InterruptedException {
acc += ratio;
while (acc >= 1.0 && !reduces.isEmpty()) {
key.setSeed(r.nextLong());
val.setSeed(r.nextLong());
final int idx = r.nextInt(reduces.size());
final RecordFactory f = reduces.get(idx);
if (!f.next(key, val)) {
reduces.remove(idx);
continue;
}
context.write(key, val);
acc -= 1.0;
// match inline
try {
matcher.match();
} catch (Exception e) {
LOG.debug("Error in resource usage emulation! Message: ", e);
}
}
}
@Override
public void cleanup(Context context)
throws IOException, InterruptedException {
LOG.info("Starting the cleanup phase.");
for (RecordFactory factory : reduces) {
key.setSeed(r.nextLong());
while (factory.next(key, val)) {
// send the progress update (maybe make this a thread)
context.progress();
context.write(key, val);
key.setSeed(r.nextLong());
// match inline
try {
matcher.match();
} catch (Exception e) {
LOG.debug("Error in resource usage emulation! Message: ", e);
}
}
}
// check if the thread will get a chance to run or not
// check if there will be a sort&spill->merge phase or not
// check if the final sort&spill->merge phase is gonna happen or not
if (context.getNumReduceTasks() > 0
&& context.getCounter(TaskCounter.SPILLED_RECORDS).getValue() == 0) {
LOG.info("Boosting the map phase progress.");
// add the sort phase progress to the map phase and emulate
matcher.boost(0.33f);
matcher.match();
}
// start the matcher thread since the map phase ends here
matcher.start();
}
}
public static class LoadReducer
extends Reducer<GridmixKey,GridmixRecord,NullWritable,GridmixRecord> {
private final Random r = new Random();
private final GridmixRecord val = new GridmixRecord();
private double acc;
private double ratio;
private RecordFactory factory;
private ResourceUsageMatcherRunner matcher = null;
private StatusReporter reporter = null;
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
if (!context.nextKey()
|| context.getCurrentKey().getType() != GridmixKey.REDUCE_SPEC) {
throw new IOException("Missing reduce spec");
}
long outBytes = 0L;
long outRecords = 0L;
long inRecords = 0L;
ResourceUsageMetrics metrics = new ResourceUsageMetrics();
for (GridmixRecord ignored : context.getValues()) {
final GridmixKey spec = context.getCurrentKey();
inRecords += spec.getReduceInputRecords();
outBytes += spec.getReduceOutputBytes();
outRecords += spec.getReduceOutputRecords();
if (spec.getReduceResourceUsageMetrics() != null) {
metrics = spec.getReduceResourceUsageMetrics();
}
}
if (0 == outRecords && inRecords > 0) {
LOG.info("Spec output bytes w/o records. Using input record count");
outRecords = inRecords;
}
// enable gridmix reduce output record for compression
Configuration conf = context.getConfiguration();
if (CompressionEmulationUtil.isCompressionEmulationEnabled(conf)
&& FileOutputFormat.getCompressOutput(context)) {
float compressionRatio =
CompressionEmulationUtil
.getJobOutputCompressionEmulationRatio(conf);
LOG.info("GridMix is configured to use a compression ratio of "
+ compressionRatio + " for the reduce output data.");
val.setCompressibility(true, compressionRatio);
// Set the actual output data size to make sure that the actual output
// data size is same after compression
outBytes /= compressionRatio;
}
factory =
new AvgRecordFactory(outBytes, outRecords,
context.getConfiguration(), 5*1024);
ratio = outRecords / (1.0 * inRecords);
acc = 0.0;
matcher = new ResourceUsageMatcherRunner(context, metrics);
// start the status reporter thread
reporter = new StatusReporter(context, matcher);
reporter.start();
}
@Override
protected void reduce(GridmixKey key, Iterable<GridmixRecord> values,
Context context)
throws IOException, InterruptedException {
for (GridmixRecord ignored : values) {
acc += ratio;
while (acc >= 1.0 && factory.next(null, val)) {
context.write(NullWritable.get(), val);
acc -= 1.0;
// match inline
try {
matcher.match();
} catch (Exception e) {
LOG.debug("Error in resource usage emulation! Message: ", e);
}
}
}
}
@Override
protected void cleanup(Context context)
throws IOException, InterruptedException {
val.setSeed(r.nextLong());
while (factory.next(null, val)) {
context.write(NullWritable.get(), val);
val.setSeed(r.nextLong());
// match inline
try {
matcher.match();
} catch (Exception e) {
LOG.debug("Error in resource usage emulation! Message: ", e);
}
}
}
}
static class LoadRecordReader
extends RecordReader<NullWritable,GridmixRecord> {
private RecordFactory factory;
private final Random r = new Random();
private final GridmixRecord val = new GridmixRecord();
public LoadRecordReader() { }
@Override
public void initialize(InputSplit genericSplit, TaskAttemptContext ctxt)
throws IOException, InterruptedException {
final LoadSplit split = (LoadSplit)genericSplit;
final Configuration conf = ctxt.getConfiguration();
factory =
new ReadRecordFactory(split.getLength(), split.getInputRecords(),
new FileQueue(split, conf), conf);
}
@Override
public boolean nextKeyValue() throws IOException {
val.setSeed(r.nextLong());
return factory.next(null, val);
}
@Override
public float getProgress() throws IOException {
return factory.getProgress();
}
@Override
public NullWritable getCurrentKey() {
return NullWritable.get();
}
@Override
public GridmixRecord getCurrentValue() {
return val;
}
@Override
public void close() throws IOException {
factory.close();
}
}
static class LoadInputFormat
extends InputFormat<NullWritable,GridmixRecord> {
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
return pullDescription(jobCtxt);
}
@Override
public RecordReader<NullWritable,GridmixRecord> createRecordReader(
InputSplit split, final TaskAttemptContext taskContext)
throws IOException {
return new LoadRecordReader();
}
}
@Override
void buildSplits(FilePool inputDir) throws IOException {
long mapInputBytesTotal = 0L;
long mapOutputBytesTotal = 0L;
long mapOutputRecordsTotal = 0L;
final JobStory jobdesc = getJobDesc();
if (null == jobdesc) {
return;
}
final int maps = jobdesc.getNumberMaps();
final int reds = jobdesc.getNumberReduces();
for (int i = 0; i < maps; ++i) {
final TaskInfo info = jobdesc.getTaskInfo(TaskType.MAP, i);
mapInputBytesTotal += info.getInputBytes();
mapOutputBytesTotal += info.getOutputBytes();
mapOutputRecordsTotal += info.getOutputRecords();
}
final double[] reduceRecordRatio = new double[reds];
final double[] reduceByteRatio = new double[reds];
for (int i = 0; i < reds; ++i) {
final TaskInfo info = jobdesc.getTaskInfo(TaskType.REDUCE, i);
reduceByteRatio[i] = info.getInputBytes() / (1.0 * mapOutputBytesTotal);
reduceRecordRatio[i] =
info.getInputRecords() / (1.0 * mapOutputRecordsTotal);
}
final InputStriper striper = new InputStriper(inputDir, mapInputBytesTotal);
final List<InputSplit> splits = new ArrayList<InputSplit>();
for (int i = 0; i < maps; ++i) {
final int nSpec = reds / maps + ((reds % maps) > i ? 1 : 0);
final long[] specBytes = new long[nSpec];
final long[] specRecords = new long[nSpec];
final ResourceUsageMetrics[] metrics = new ResourceUsageMetrics[nSpec];
for (int j = 0; j < nSpec; ++j) {
final TaskInfo info =
jobdesc.getTaskInfo(TaskType.REDUCE, i + j * maps);
specBytes[j] = info.getOutputBytes();
specRecords[j] = info.getOutputRecords();
metrics[j] = info.getResourceUsageMetrics();
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("SPEC(%d) %d -> %d %d %d %d %d %d %d", id(), i,
i + j * maps, info.getOutputRecords(),
info.getOutputBytes(),
info.getResourceUsageMetrics().getCumulativeCpuUsage(),
info.getResourceUsageMetrics().getPhysicalMemoryUsage(),
info.getResourceUsageMetrics().getVirtualMemoryUsage(),
info.getResourceUsageMetrics().getHeapUsage()));
}
}
final TaskInfo info = jobdesc.getTaskInfo(TaskType.MAP, i);
long possiblyCompressedInputBytes = info.getInputBytes();
Configuration conf = job.getConfiguration();
long uncompressedInputBytes =
CompressionEmulationUtil.getUncompressedInputBytes(
possiblyCompressedInputBytes, conf);
splits.add(
new LoadSplit(striper.splitFor(inputDir, uncompressedInputBytes, 3),
maps, i, uncompressedInputBytes, info.getInputRecords(),
info.getOutputBytes(), info.getOutputRecords(),
reduceByteRatio, reduceRecordRatio, specBytes,
specRecords, info.getResourceUsageMetrics(),
metrics));
}
pushDescription(id(), splits);
}
}
| 24,212 | 35.465361 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FileQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
/**
* Given a {@link org.apache.hadoop.mapreduce.lib.input.CombineFileSplit},
* circularly read through each input source.
*/
class FileQueue extends InputStream {
private int idx = -1;
private long curlen = -1L;
private InputStream input;
private final byte[] z = new byte[1];
private final Path[] paths;
private final long[] lengths;
private final long[] startoffset;
private final Configuration conf;
/**
* @param split Description of input sources.
* @param conf Used to resolve FileSystem instances.
*/
public FileQueue(CombineFileSplit split, Configuration conf)
throws IOException {
this.conf = conf;
paths = split.getPaths();
startoffset = split.getStartOffsets();
lengths = split.getLengths();
nextSource();
}
protected void nextSource() throws IOException {
if (0 == paths.length) {
return;
}
if (input != null) {
input.close();
}
idx = (idx + 1) % paths.length;
curlen = lengths[idx];
final Path file = paths[idx];
input =
CompressionEmulationUtil.getPossiblyDecompressedInputStream(file,
conf, startoffset[idx]);
}
@Override
public int read() throws IOException {
final int tmp = read(z);
return tmp == -1 ? -1 : (0xFF & z[0]);
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int kvread = 0;
while (kvread < len) {
if (curlen <= 0) {
nextSource();
continue;
}
final int srcRead = (int) Math.min(len - kvread, curlen);
IOUtils.readFully(input, b, kvread, srcRead);
curlen -= srcRead;
kvread += srcRead;
}
return kvread;
}
@Override
public void close() throws IOException {
input.close();
}
}
| 2,972 | 27.586538 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobCreator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public enum JobCreator {
LOADJOB {
@Override
public GridmixJob createGridmixJob(
Configuration gridmixConf, long submissionMillis, JobStory jobdesc,
Path outRoot, UserGroupInformation ugi, int seq) throws IOException {
// Build configuration for this simulated job
Configuration conf = new Configuration(gridmixConf);
dce.configureDistCacheFiles(conf, jobdesc.getJobConf());
return new LoadJob(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
}
@Override
public boolean canEmulateDistCacheLoad() {
return true;
}
},
SLEEPJOB {
private String[] hosts;
@Override
public GridmixJob createGridmixJob(
Configuration conf, long submissionMillis, JobStory jobdesc, Path outRoot,
UserGroupInformation ugi, int seq) throws IOException {
int numLocations = conf.getInt(SLEEPJOB_RANDOM_LOCATIONS, 0);
if (numLocations < 0) numLocations = 0;
if (hosts == null) {
final JobClient client = new JobClient(new JobConf(conf));
ClusterStatus stat = client.getClusterStatus(true);
final int nTrackers = stat.getTaskTrackers();
final ArrayList<String> hostList = new ArrayList<String>(nTrackers);
final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*");
final Matcher m = trackerPattern.matcher("");
for (String tracker : stat.getActiveTrackerNames()) {
m.reset(tracker);
if (!m.find()) {
continue;
}
final String name = m.group(1);
hostList.add(name);
}
hosts = hostList.toArray(new String[hostList.size()]);
}
return new SleepJob(conf, submissionMillis, jobdesc, outRoot, ugi, seq,
numLocations, hosts);
}
@Override
public boolean canEmulateDistCacheLoad() {
return false;
}
};
public static final String GRIDMIX_JOB_TYPE = "gridmix.job.type";
public static final String SLEEPJOB_RANDOM_LOCATIONS =
"gridmix.sleep.fake-locations";
/**
* Create Gridmix simulated job.
* @param conf configuration of simulated job
* @param submissionMillis At what time submission of this simulated job be
* done
* @param jobdesc JobStory obtained from trace
* @param outRoot gridmix output directory
* @param ugi UGI of job submitter of this simulated job
* @param seq job sequence number
* @return the created simulated job
* @throws IOException
*/
public abstract GridmixJob createGridmixJob(
final Configuration conf, long submissionMillis, final JobStory jobdesc,
Path outRoot, UserGroupInformation ugi, final int seq) throws IOException;
public static JobCreator getPolicy(
Configuration conf, JobCreator defaultPolicy) {
return conf.getEnum(GRIDMIX_JOB_TYPE, defaultPolicy);
}
/**
* @return true if gridmix simulated jobs of this job type can emulate
* distributed cache load
*/
abstract boolean canEmulateDistCacheLoad();
DistributedCacheEmulator dce;
/**
* This method is to be called before calling any other method in JobCreator
* except canEmulateDistCacheLoad(), especially if canEmulateDistCacheLoad()
* returns true for that job type.
* @param e Distributed Cache Emulator
*/
void setDistCacheEmulator(DistributedCacheEmulator e) {
this.dce = e;
}
}
| 4,861 | 34.75 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Echos the UGI offered.
*/
public class EchoUserResolver implements UserResolver {
public static final Log LOG = LogFactory.getLog(Gridmix.class);
public EchoUserResolver() {
LOG.info(" Current user resolver is EchoUserResolver ");
}
public synchronized boolean setTargetUsers(URI userdesc, Configuration conf)
throws IOException {
return false;
}
public synchronized UserGroupInformation getTargetUgi(
UserGroupInformation ugi) {
return ugi;
}
/**
* {@inheritDoc}
* <br><br>
* Since {@link EchoUserResolver} simply returns the user's name passed as
* the argument, it doesn't need a target list of users.
*/
public boolean needsTargetUsersList() {
return false;
}
}
| 1,822 | 30.431034 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Progressive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
/**
* Used to track progress of tasks.
*/
public interface Progressive {
public float getProgress();
}
| 956 | 37.28 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.Closeable;
import java.io.IOException;
/**
* Interface for producing records as inputs and outputs to tasks.
*/
abstract class RecordFactory implements Closeable {
/**
* Transform the given record or perform some operation.
* @return true if the record should be emitted.
*/
public abstract boolean next(GridmixKey key, GridmixRecord val)
throws IOException;
/**
* Estimate of exhausted record capacity.
*/
public abstract float getProgress() throws IOException;
}
| 1,363 | 32.268293 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SleepJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapred.gridmix.RandomAlgorithms.Selector;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo;
import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class SleepJob extends GridmixJob {
public static final Log LOG = LogFactory.getLog(SleepJob.class);
private static final ThreadLocal <Random> rand =
new ThreadLocal <Random> () {
@Override protected Random initialValue() {
return new Random();
}
};
public static final String SLEEPJOB_MAPTASK_ONLY="gridmix.sleep.maptask-only";
private final boolean mapTasksOnly;
private final int fakeLocations;
private final String[] hosts;
private final Selector selector;
/**
* Interval at which to report progress, in seconds.
*/
public static final String GRIDMIX_SLEEP_INTERVAL = "gridmix.sleep.interval";
public static final String GRIDMIX_SLEEP_MAX_MAP_TIME =
"gridmix.sleep.max-map-time";
public static final String GRIDMIX_SLEEP_MAX_REDUCE_TIME =
"gridmix.sleep.max-reduce-time";
private final long mapMaxSleepTime, reduceMaxSleepTime;
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
String[] hosts) throws IOException {
super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
this.fakeLocations = numLocations;
this.hosts = hosts.clone();
this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
/ hosts.length, rand.get()) : null;
this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
Long.MAX_VALUE);
}
@Override
protected boolean canEmulateCompression() {
return false;
}
@Override
public Job call()
throws IOException, InterruptedException, ClassNotFoundException {
ugi.doAs(
new PrivilegedExceptionAction<Job>() {
public Job run()
throws IOException, ClassNotFoundException, InterruptedException {
job.setMapperClass(SleepMapper.class);
job.setReducerClass(SleepReducer.class);
job.setNumReduceTasks((mapTasksOnly) ? 0 : jobdesc.getNumberReduces());
job.setMapOutputKeyClass(GridmixKey.class);
job.setMapOutputValueClass(NullWritable.class);
job.setSortComparatorClass(GridmixKey.Comparator.class);
job.setGroupingComparatorClass(SpecGroupingComparator.class);
job.setInputFormatClass(SleepInputFormat.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setPartitionerClass(DraftPartitioner.class);
job.setJarByClass(SleepJob.class);
job.getConfiguration().setBoolean(Job.USED_GENERIC_PARSER, true);
job.submit();
return job;
}
});
return job;
}
public static class SleepMapper
extends Mapper<LongWritable, LongWritable, GridmixKey, NullWritable> {
@Override
public void map(LongWritable key, LongWritable value, Context context)
throws IOException, InterruptedException {
context.setStatus("Sleeping... " + value.get() + " ms left");
long now = System.currentTimeMillis();
if (now < key.get()) {
TimeUnit.MILLISECONDS.sleep(key.get() - now);
}
}
@Override
public void cleanup(Context context)
throws IOException, InterruptedException {
final int nReds = context.getNumReduceTasks();
if (nReds > 0) {
final SleepSplit split = (SleepSplit) context.getInputSplit();
int id = split.getId();
final int nMaps = split.getNumMaps();
//This is a hack to pass the sleep duration via Gridmix key
//TODO: We need to come up with better solution for this.
final GridmixKey key = new GridmixKey(GridmixKey.REDUCE_SPEC, 0, 0L);
for (int i = id, idx = 0; i < nReds; i += nMaps) {
key.setPartition(i);
key.setReduceOutputBytes(split.getReduceDurations(idx++));
id += nReds;
context.write(key, NullWritable.get());
}
}
}
}
public static class SleepReducer
extends Reducer<GridmixKey, NullWritable, NullWritable, NullWritable> {
private long duration = 0L;
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
if (!context.nextKey() ||
context.getCurrentKey().getType() != GridmixKey.REDUCE_SPEC) {
throw new IOException("Missing reduce spec");
}
for (NullWritable ignored : context.getValues()) {
final GridmixKey spec = context.getCurrentKey();
duration += spec.getReduceOutputBytes();
}
long sleepInterval =
context.getConfiguration().getLong(GRIDMIX_SLEEP_INTERVAL, 5);
final long RINTERVAL =
TimeUnit.MILLISECONDS.convert(sleepInterval, TimeUnit.SECONDS);
//This is to stop accumulating deviation from expected sleep time
//over a period of time.
long start = System.currentTimeMillis();
long slept = 0L;
long sleep = 0L;
while (slept < duration) {
final long rem = duration - slept;
sleep = Math.min(rem, RINTERVAL);
context.setStatus("Sleeping... " + rem + " ms left");
TimeUnit.MILLISECONDS.sleep(sleep);
slept = System.currentTimeMillis() - start;
}
}
@Override
protected void cleanup(Context context)
throws IOException, InterruptedException {
final String msg = "Slept for " + duration;
LOG.info(msg);
context.setStatus(msg);
}
}
public static class SleepInputFormat
extends InputFormat<LongWritable, LongWritable> {
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
return pullDescription(jobCtxt);
}
@Override
public RecordReader<LongWritable, LongWritable> createRecordReader(
InputSplit split, final TaskAttemptContext context)
throws IOException, InterruptedException {
final long duration = split.getLength();
long sleepInterval =
context.getConfiguration().getLong(GRIDMIX_SLEEP_INTERVAL, 5);
final long RINTERVAL =
TimeUnit.MILLISECONDS.convert(sleepInterval, TimeUnit.SECONDS);
if (RINTERVAL <= 0) {
throw new IOException(
"Invalid " + GRIDMIX_SLEEP_INTERVAL + ": " + RINTERVAL);
}
return new RecordReader<LongWritable, LongWritable>() {
long start = -1;
long slept = 0L;
long sleep = 0L;
final LongWritable key = new LongWritable();
final LongWritable val = new LongWritable();
@Override
public boolean nextKeyValue() throws IOException {
if (start == -1) {
start = System.currentTimeMillis();
}
slept += sleep;
sleep = Math.min(duration - slept, RINTERVAL);
key.set(slept + sleep + start);
val.set(duration - slept);
return slept < duration;
}
@Override
public float getProgress() throws IOException {
return slept / ((float) duration);
}
@Override
public LongWritable getCurrentKey() {
return key;
}
@Override
public LongWritable getCurrentValue() {
return val;
}
@Override
public void close() throws IOException {
final String msg = "Slept for " + duration;
LOG.info(msg);
}
public void initialize(InputSplit split, TaskAttemptContext ctxt) {
}
};
}
}
public static class SleepSplit extends InputSplit implements Writable {
private int id;
private int nSpec;
private int nMaps;
private long sleepDuration;
private long[] reduceDurations = new long[0];
private String[] locations = new String[0];
public SleepSplit() {
}
public SleepSplit(
int id, long sleepDuration, long[] reduceDurations, int nMaps,
String[] locations) {
this.id = id;
this.sleepDuration = sleepDuration;
nSpec = reduceDurations.length;
this.reduceDurations = reduceDurations.clone();
this.nMaps = nMaps;
this.locations = locations.clone();
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, id);
WritableUtils.writeVLong(out, sleepDuration);
WritableUtils.writeVInt(out, nMaps);
WritableUtils.writeVInt(out, nSpec);
for (int i = 0; i < nSpec; ++i) {
WritableUtils.writeVLong(out, reduceDurations[i]);
}
WritableUtils.writeVInt(out, locations.length);
for (int i = 0; i < locations.length; ++i) {
Text.writeString(out, locations[i]);
}
}
@Override
public void readFields(DataInput in) throws IOException {
id = WritableUtils.readVInt(in);
sleepDuration = WritableUtils.readVLong(in);
nMaps = WritableUtils.readVInt(in);
nSpec = WritableUtils.readVInt(in);
if (reduceDurations.length < nSpec) {
reduceDurations = new long[nSpec];
}
for (int i = 0; i < nSpec; ++i) {
reduceDurations[i] = WritableUtils.readVLong(in);
}
final int nLoc = WritableUtils.readVInt(in);
if (nLoc != locations.length) {
locations = new String[nLoc];
}
for (int i = 0; i < nLoc; ++i) {
locations[i] = Text.readString(in);
}
}
@Override
public long getLength() {
return sleepDuration;
}
public int getId() {
return id;
}
public int getNumMaps() {
return nMaps;
}
public long getReduceDurations(int i) {
return reduceDurations[i];
}
@Override
public String[] getLocations() {
return locations.clone();
}
}
private TaskAttemptInfo getSuccessfulAttemptInfo(TaskType type, int task) {
TaskAttemptInfo ret;
for (int i = 0; true; ++i) {
// Rumen should make up an attempt if it's missing. Or this won't work
// at all. It's hard to discern what is happening in there.
ret = jobdesc.getTaskAttemptInfo(type, task, i);
if (ret.getRunState() == TaskStatus.State.SUCCEEDED) {
break;
}
}
if(ret.getRunState() != TaskStatus.State.SUCCEEDED) {
LOG.warn("No sucessful attempts tasktype " + type +" task "+ task);
}
return ret;
}
@Override
void buildSplits(FilePool inputDir) throws IOException {
final List<InputSplit> splits = new ArrayList<InputSplit>();
final int reds = (mapTasksOnly) ? 0 : jobdesc.getNumberReduces();
final int maps = jobdesc.getNumberMaps();
for (int i = 0; i < maps; ++i) {
final int nSpec = reds / maps + ((reds % maps) > i ? 1 : 0);
final long[] redDurations = new long[nSpec];
for (int j = 0; j < nSpec; ++j) {
final ReduceTaskAttemptInfo info =
(ReduceTaskAttemptInfo) getSuccessfulAttemptInfo(TaskType.REDUCE,
i + j * maps);
// Include only merge/reduce time
redDurations[j] = Math.min(reduceMaxSleepTime, info.getMergeRuntime()
+ info.getReduceRuntime());
if (LOG.isDebugEnabled()) {
LOG.debug(
String.format(
"SPEC(%d) %d -> %d %d/%d", id(), i, i + j * maps, redDurations[j],
info.getRuntime()));
}
}
final TaskAttemptInfo info = getSuccessfulAttemptInfo(TaskType.MAP, i);
ArrayList<String> locations = new ArrayList<String>(fakeLocations);
if (fakeLocations > 0) {
selector.reset();
}
for (int k=0; k<fakeLocations; ++k) {
int index = selector.next();
if (index < 0) break;
locations.add(hosts[index]);
}
splits.add(new SleepSplit(i,
Math.min(info.getRuntime(), mapMaxSleepTime), redDurations, maps,
locations.toArray(new String[locations.size()]))); }
pushDescription(id(), splits);
}
}
| 14,256 | 33.604369 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.emulators.resourceusage;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.gridmix.Progressive;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/**
* <p>A {@link ResourceUsageEmulatorPlugin} that emulates the total heap
* usage by loading the JVM heap memory. Adding smaller chunks of data to the
* heap will essentially use up some heap space thus forcing the JVM to expand
* its heap and thus resulting into increase in the heap usage.</p>
*
* <p>{@link TotalHeapUsageEmulatorPlugin} emulates the heap usage in steps.
* The frequency of emulation can be configured via
* {@link #HEAP_EMULATION_PROGRESS_INTERVAL}.
* Heap usage values are matched via emulation only at specific interval
* boundaries.
* </p>
*
* {@link TotalHeapUsageEmulatorPlugin} is a wrapper program for managing
* the heap usage emulation feature. It internally uses an emulation algorithm
* (called as core and described using {@link HeapUsageEmulatorCore}) for
* performing the actual emulation. Multiple calls to this core engine should
* use up some amount of heap.
*/
public class TotalHeapUsageEmulatorPlugin
implements ResourceUsageEmulatorPlugin {
// Configuration parameters
// the core engine to emulate heap usage
protected HeapUsageEmulatorCore emulatorCore;
// the progress bar
private Progressive progress;
// decides if this plugin can emulate heap usage or not
private boolean enabled = true;
// the progress boundaries/interval where emulation should be done
private float emulationInterval;
// target heap usage to emulate
private long targetHeapUsageInMB = 0;
/**
* The frequency (based on task progress) with which memory-emulation code is
* run. If the value is set to 0.1 then the emulation will happen at 10% of
* the task's progress. The default value of this parameter is
* {@link #DEFAULT_EMULATION_PROGRESS_INTERVAL}.
*/
public static final String HEAP_EMULATION_PROGRESS_INTERVAL =
"gridmix.emulators.resource-usage.heap.emulation-interval";
// Default value for emulation interval
private static final float DEFAULT_EMULATION_PROGRESS_INTERVAL = 0.1F; // 10 %
private float prevEmulationProgress = 0F;
/**
* The minimum buffer reserved for other non-emulation activities.
*/
public static final String MIN_HEAP_FREE_RATIO =
"gridmix.emulators.resource-usage.heap.min-free-ratio";
private float minFreeHeapRatio;
private static final float DEFAULT_MIN_FREE_HEAP_RATIO = 0.3F;
/**
* Determines the unit increase per call to the core engine's load API. This
* is expressed as a percentage of the difference between the expected total
* heap usage and the current usage.
*/
public static final String HEAP_LOAD_RATIO =
"gridmix.emulators.resource-usage.heap.load-ratio";
private float heapLoadRatio;
private static final float DEFAULT_HEAP_LOAD_RATIO = 0.1F;
public static final int ONE_MB = 1024 * 1024;
/**
* Defines the core heap usage emulation algorithm. This engine is expected
* to perform certain memory intensive operations to consume some
* amount of heap. {@link #load(long)} should load the current heap and
* increase the heap usage by the specified value. This core engine can be
* initialized using the {@link #initialize(ResourceCalculatorPlugin, long)}
* API to suit the underlying hardware better.
*/
public interface HeapUsageEmulatorCore {
/**
* Performs some memory intensive operations to use up some heap.
*/
public void load(long sizeInMB);
/**
* Initialize the core.
*/
public void initialize(ResourceCalculatorPlugin monitor,
long totalHeapUsageInMB);
/**
* Reset the resource usage
*/
public void reset();
}
/**
* This is the core engine to emulate the heap usage. The only responsibility
* of this class is to perform certain memory intensive operations to make
* sure that some desired value of heap is used.
*/
public static class DefaultHeapUsageEmulator
implements HeapUsageEmulatorCore {
// store the unit loads in a list
protected static final ArrayList<Object> heapSpace =
new ArrayList<Object>();
/**
* Increase heap usage by current process by the given amount.
* This is done by creating objects each of size 1MB.
*/
public void load(long sizeInMB) {
for (long i = 0; i < sizeInMB; ++i) {
// Create another String object of size 1MB
heapSpace.add((Object)new byte[ONE_MB]);
}
}
/**
* This will initialize the core and check if the core can emulate the
* desired target on the underlying hardware.
*/
public void initialize(ResourceCalculatorPlugin monitor,
long totalHeapUsageInMB) {
long maxPhysicalMemoryInMB = monitor.getPhysicalMemorySize() / ONE_MB ;
if(maxPhysicalMemoryInMB < totalHeapUsageInMB) {
throw new RuntimeException("Total heap the can be used is "
+ maxPhysicalMemoryInMB
+ " bytes while the emulator is configured to emulate a total of "
+ totalHeapUsageInMB + " bytes");
}
}
/**
* Clear references to all the GridMix-allocated special objects so that
* heap usage is reduced.
*/
@Override
public void reset() {
heapSpace.clear();
}
}
public TotalHeapUsageEmulatorPlugin() {
this(new DefaultHeapUsageEmulator());
}
/**
* For testing.
*/
public TotalHeapUsageEmulatorPlugin(HeapUsageEmulatorCore core) {
emulatorCore = core;
}
protected long getTotalHeapUsageInMB() {
return Runtime.getRuntime().totalMemory() / ONE_MB;
}
protected long getMaxHeapUsageInMB() {
return Runtime.getRuntime().maxMemory() / ONE_MB;
}
@Override
public float getProgress() {
return enabled
? Math.min(1f, ((float)getTotalHeapUsageInMB())/targetHeapUsageInMB)
: 1.0f;
}
@Override
public void emulate() throws IOException, InterruptedException {
if (enabled) {
float currentProgress = progress.getProgress();
if (prevEmulationProgress < currentProgress
&& ((currentProgress - prevEmulationProgress) >= emulationInterval
|| currentProgress == 1)) {
long maxHeapSizeInMB = getMaxHeapUsageInMB();
long committedHeapSizeInMB = getTotalHeapUsageInMB();
// Increase committed heap usage, if needed
// Using a linear weighing function for computing the expected usage
long expectedHeapUsageInMB =
Math.min(maxHeapSizeInMB,
(long) (targetHeapUsageInMB * currentProgress));
if (expectedHeapUsageInMB < maxHeapSizeInMB
&& committedHeapSizeInMB < expectedHeapUsageInMB) {
long bufferInMB = (long)(minFreeHeapRatio * expectedHeapUsageInMB);
long currentDifferenceInMB =
expectedHeapUsageInMB - committedHeapSizeInMB;
long currentIncrementLoadSizeInMB =
(long)(currentDifferenceInMB * heapLoadRatio);
// Make sure that at least 1 MB is incremented.
currentIncrementLoadSizeInMB =
Math.max(1, currentIncrementLoadSizeInMB);
while (committedHeapSizeInMB + bufferInMB < expectedHeapUsageInMB) {
// add blocks in order of X% of the difference, X = 10% by default
emulatorCore.load(currentIncrementLoadSizeInMB);
committedHeapSizeInMB = getTotalHeapUsageInMB();
}
}
// store the emulation progress boundary
prevEmulationProgress = currentProgress;
}
// reset the core so that the garbage is reclaimed
emulatorCore.reset();
}
}
@Override
public void initialize(Configuration conf, ResourceUsageMetrics metrics,
ResourceCalculatorPlugin monitor,
Progressive progress) {
this.progress = progress;
// get the target heap usage
targetHeapUsageInMB = metrics.getHeapUsage() / ONE_MB;
if (targetHeapUsageInMB <= 0 ) {
enabled = false;
return;
} else {
// calibrate the core heap-usage utility
emulatorCore.initialize(monitor, targetHeapUsageInMB);
enabled = true;
}
emulationInterval =
conf.getFloat(HEAP_EMULATION_PROGRESS_INTERVAL,
DEFAULT_EMULATION_PROGRESS_INTERVAL);
minFreeHeapRatio = conf.getFloat(MIN_HEAP_FREE_RATIO,
DEFAULT_MIN_FREE_HEAP_RATIO);
heapLoadRatio = conf.getFloat(HEAP_LOAD_RATIO, DEFAULT_HEAP_LOAD_RATIO);
prevEmulationProgress = 0;
}
}
| 9,796 | 35.692884 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.emulators.resourceusage;
import java.io.IOException;
import org.apache.hadoop.mapred.gridmix.Progressive;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.conf.Configuration;
/**
* <p>Each resource to be emulated should have a corresponding implementation
* class that implements {@link ResourceUsageEmulatorPlugin}.</p>
* <br><br>
* {@link ResourceUsageEmulatorPlugin} will be configured using the
* {@link #initialize(Configuration, ResourceUsageMetrics,
* ResourceCalculatorPlugin, Progressive)} call.
* Every
* {@link ResourceUsageEmulatorPlugin} is also configured with a feedback module
* i.e a {@link ResourceCalculatorPlugin}, to monitor the current resource
* usage. {@link ResourceUsageMetrics} decides the final resource usage value to
* emulate. {@link Progressive} keeps track of the task's progress.
*
* <br><br>
*
* For configuring GridMix to load and and use a resource usage emulator,
* see {@link ResourceUsageMatcher}.
*/
public interface ResourceUsageEmulatorPlugin extends Progressive {
/**
* Initialize the plugin. This might involve
* - initializing the variables
* - calibrating the plugin
*/
void initialize(Configuration conf, ResourceUsageMetrics metrics,
ResourceCalculatorPlugin monitor,
Progressive progress);
/**
* Emulate the resource usage to match the usage target. The plugin can use
* the given {@link ResourceCalculatorPlugin} to query for the current
* resource usage.
* @throws IOException
* @throws InterruptedException
*/
void emulate() throws IOException, InterruptedException;
}
| 2,595 | 39.5625 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.emulators.resourceusage;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.gridmix.Progressive;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/**
* <p>This is the driver class for managing all the resource usage emulators.
* {@link ResourceUsageMatcher} expects a comma separated list of
* {@link ResourceUsageEmulatorPlugin} implementations specified using
* {@link #RESOURCE_USAGE_EMULATION_PLUGINS} as the configuration parameter.</p>
*
* <p>Note that the order in which the emulators are invoked is same as the
* order in which they are configured.
*/
public class ResourceUsageMatcher implements Progressive {
/**
* Configuration key to set resource usage emulators.
*/
public static final String RESOURCE_USAGE_EMULATION_PLUGINS =
"gridmix.emulators.resource-usage.plugins";
private List<ResourceUsageEmulatorPlugin> emulationPlugins =
new ArrayList<ResourceUsageEmulatorPlugin>();
/**
* Configure the {@link ResourceUsageMatcher} to load the configured plugins
* and initialize them.
*/
@SuppressWarnings("unchecked")
public void configure(Configuration conf, ResourceCalculatorPlugin monitor,
ResourceUsageMetrics metrics, Progressive progress) {
Class[] plugins = conf.getClasses(RESOURCE_USAGE_EMULATION_PLUGINS);
if (plugins == null) {
System.out.println("No resource usage emulator plugins configured.");
} else {
for (Class clazz : plugins) {
if (clazz != null) {
if (ResourceUsageEmulatorPlugin.class.isAssignableFrom(clazz)) {
ResourceUsageEmulatorPlugin plugin =
(ResourceUsageEmulatorPlugin) ReflectionUtils.newInstance(clazz,
conf);
emulationPlugins.add(plugin);
} else {
throw new RuntimeException("Misconfigured resource usage plugins. "
+ "Class " + clazz.getClass().getName() + " is not a resource "
+ "usage plugin as it does not extend "
+ ResourceUsageEmulatorPlugin.class.getName());
}
}
}
}
// initialize the emulators once all the configured emulator plugins are
// loaded
for (ResourceUsageEmulatorPlugin emulator : emulationPlugins) {
emulator.initialize(conf, metrics, monitor, progress);
}
}
public void matchResourceUsage() throws IOException, InterruptedException {
for (ResourceUsageEmulatorPlugin emulator : emulationPlugins) {
// match the resource usage
emulator.emulate();
}
}
/**
* Returns the average progress.
*/
@Override
public float getProgress() {
if (emulationPlugins.size() > 0) {
// return the average progress
float progress = 0f;
for (ResourceUsageEmulatorPlugin emulator : emulationPlugins) {
// consider weighted progress of each emulator
progress += emulator.getProgress();
}
return progress / emulationPlugins.size();
}
// if no emulators are configured then return 1
return 1f;
}
}
| 4,189 | 36.747748 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.emulators.resourceusage;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.gridmix.Progressive;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/**
* <p>A {@link ResourceUsageEmulatorPlugin} that emulates the cumulative CPU
* usage by performing certain CPU intensive operations. Performing such CPU
* intensive operations essentially uses up some CPU. Every
* {@link ResourceUsageEmulatorPlugin} is configured with a feedback module i.e
* a {@link ResourceCalculatorPlugin}, to monitor the resource usage.</p>
*
* <p>{@link CumulativeCpuUsageEmulatorPlugin} emulates the CPU usage in steps.
* The frequency of emulation can be configured via
* {@link #CPU_EMULATION_PROGRESS_INTERVAL}.
* CPU usage values are matched via emulation only on the interval boundaries.
* </p>
*
* {@link CumulativeCpuUsageEmulatorPlugin} is a wrapper program for managing
* the CPU usage emulation feature. It internally uses an emulation algorithm
* (called as core and described using {@link CpuUsageEmulatorCore}) for
* performing the actual emulation. Multiple calls to this core engine should
* use up some amount of CPU.<br>
*
* <p>{@link CumulativeCpuUsageEmulatorPlugin} provides a calibration feature
* via {@link #initialize(Configuration, ResourceUsageMetrics,
* ResourceCalculatorPlugin, Progressive)} to calibrate
* the plugin and its core for the underlying hardware. As a result of
* calibration, every call to the emulation engine's core should roughly use up
* 1% of the total usage value to be emulated. This makes sure that the
* underlying hardware is profiled before use and that the plugin doesn't
* accidently overuse the CPU. With 1% as the unit emulation target value for
* the core engine, there will be roughly 100 calls to the engine resulting in
* roughly 100 calls to the feedback (resource usage monitor) module.
* Excessive usage of the feedback module is discouraged as
* it might result into excess CPU usage resulting into no real CPU emulation.
* </p>
*/
public class CumulativeCpuUsageEmulatorPlugin
implements ResourceUsageEmulatorPlugin {
protected CpuUsageEmulatorCore emulatorCore;
private ResourceCalculatorPlugin monitor;
private Progressive progress;
private boolean enabled = true;
private float emulationInterval; // emulation interval
private long targetCpuUsage = 0;
private float lastSeenProgress = 0;
private long lastSeenCpuUsage = 0;
// Configuration parameters
public static final String CPU_EMULATION_PROGRESS_INTERVAL =
"gridmix.emulators.resource-usage.cpu.emulation-interval";
private static final float DEFAULT_EMULATION_FREQUENCY = 0.1F; // 10 times
/**
* This is the core CPU usage emulation algorithm. This is the core engine
* which actually performs some CPU intensive operations to consume some
* amount of CPU. Multiple calls of {@link #compute()} should help the
* plugin emulate the desired level of CPU usage. This core engine can be
* calibrated using the {@link #calibrate(ResourceCalculatorPlugin, long)}
* API to suit the underlying hardware better. It also can be used to optimize
* the emulation cycle.
*/
public interface CpuUsageEmulatorCore {
/**
* Performs some computation to use up some CPU.
*/
public void compute();
/**
* Allows the core to calibrate itself.
*/
public void calibrate(ResourceCalculatorPlugin monitor,
long totalCpuUsage);
}
/**
* This is the core engine to emulate the CPU usage. The only responsibility
* of this class is to perform certain math intensive operations to make sure
* that some desired value of CPU is used.
*/
public static class DefaultCpuUsageEmulator implements CpuUsageEmulatorCore {
// number of times to loop for performing the basic unit computation
private int numIterations;
private final Random random;
/**
* This is to fool the JVM and make it think that we need the value
* stored in the unit computation i.e {@link #compute()}. This will prevent
* the JVM from optimizing the code.
*/
protected double returnValue;
/**
* Initialized the {@link DefaultCpuUsageEmulator} with default values.
* Note that the {@link DefaultCpuUsageEmulator} should be calibrated
* (see {@link #calibrate(ResourceCalculatorPlugin, long)}) when initialized
* using this constructor.
*/
public DefaultCpuUsageEmulator() {
this(-1);
}
DefaultCpuUsageEmulator(int numIterations) {
this.numIterations = numIterations;
random = new Random();
}
/**
* This will consume some desired level of CPU. This API will try to use up
* 'X' percent of the target cumulative CPU usage. Currently X is set to
* 10%.
*/
public void compute() {
for (int i = 0; i < numIterations; ++i) {
performUnitComputation();
}
}
// Perform unit computation. The complete CPU emulation will be based on
// multiple invocations to this unit computation module.
protected void performUnitComputation() {
//TODO can this be configurable too. Users/emulators should be able to
// pick and choose what MATH operations to run.
// Example :
// BASIC : ADD, SUB, MUL, DIV
// ADV : SQRT, SIN, COSIN..
// COMPO : (BASIC/ADV)*
// Also define input generator. For now we can use the random number
// generator. Later this can be changed to accept multiple sources.
int randomData = random.nextInt();
int randomDataCube = randomData * randomData * randomData;
double randomDataCubeRoot = Math.cbrt(randomData);
returnValue = Math.log(Math.tan(randomDataCubeRoot
* Math.exp(randomDataCube))
* Math.sqrt(randomData));
}
/**
* This will calibrate the algorithm such that a single invocation of
* {@link #compute()} emulates roughly 1% of the total desired resource
* usage value.
*/
public void calibrate(ResourceCalculatorPlugin monitor,
long totalCpuUsage) {
long initTime = monitor.getCumulativeCpuTime();
long defaultLoopSize = 0;
long finalTime = initTime;
//TODO Make this configurable
while (finalTime - initTime < 100) { // 100 ms
++defaultLoopSize;
performUnitComputation(); //perform unit computation
finalTime = monitor.getCumulativeCpuTime();
}
long referenceRuntime = finalTime - initTime;
// time for one loop = (final-time - init-time) / total-loops
float timePerLoop = ((float)referenceRuntime) / defaultLoopSize;
// compute the 1% of the total CPU usage desired
//TODO Make this configurable
long onePercent = totalCpuUsage / 100;
// num-iterations for 1% = (total-desired-usage / 100) / time-for-one-loop
numIterations = Math.max(1, (int)((float)onePercent/timePerLoop));
System.out.println("Calibration done. Basic computation runtime : "
+ timePerLoop + " milliseconds. Optimal number of iterations (1%): "
+ numIterations);
}
}
public CumulativeCpuUsageEmulatorPlugin() {
this(new DefaultCpuUsageEmulator());
}
/**
* For testing.
*/
public CumulativeCpuUsageEmulatorPlugin(CpuUsageEmulatorCore core) {
emulatorCore = core;
}
// Note that this weighing function uses only the current progress. In future,
// this might depend on progress, emulation-interval and expected target.
private float getWeightForProgressInterval(float progress) {
// we want some kind of exponential growth function that gives less weight
// on lower progress boundaries but high (exact emulation) near progress
// value of 1.
// so here is how the current growth function looks like
// progress weight
// 0.1 0.0001
// 0.2 0.0016
// 0.3 0.0081
// 0.4 0.0256
// 0.5 0.0625
// 0.6 0.1296
// 0.7 0.2401
// 0.8 0.4096
// 0.9 0.6561
// 1.0 1.000
return progress * progress * progress * progress;
}
private synchronized long getCurrentCPUUsage() {
return monitor.getCumulativeCpuTime();
}
@Override
public float getProgress() {
return enabled
? Math.min(1f, ((float)getCurrentCPUUsage())/targetCpuUsage)
: 1.0f;
}
@Override
//TODO Multi-threading for speedup?
public void emulate() throws IOException, InterruptedException {
if (enabled) {
float currentProgress = progress.getProgress();
if (lastSeenProgress < currentProgress
&& ((currentProgress - lastSeenProgress) >= emulationInterval
|| currentProgress == 1)) {
// Estimate the final cpu usage
//
// Consider the following
// Cl/Cc/Cp : Last/Current/Projected Cpu usage
// Pl/Pc/Pp : Last/Current/Projected progress
// Then
// (Cp-Cc)/(Pp-Pc) = (Cc-Cl)/(Pc-Pl)
// Solving this for Cp, we get
// Cp = Cc + (1-Pc)*(Cc-Cl)/Pc-Pl)
// Note that (Cc-Cl)/(Pc-Pl) is termed as 'rate' in the following
// section
long currentCpuUsage = getCurrentCPUUsage();
// estimate the cpu usage rate
float rate = (currentCpuUsage - lastSeenCpuUsage)
/ (currentProgress - lastSeenProgress);
long projectedUsage =
currentCpuUsage + (long)((1 - currentProgress) * rate);
if (projectedUsage < targetCpuUsage) {
// determine the correction factor between the current usage and the
// expected usage and add some weight to the target
long currentWeighedTarget =
(long)(targetCpuUsage
* getWeightForProgressInterval(currentProgress));
while (getCurrentCPUUsage() < currentWeighedTarget) {
emulatorCore.compute();
// sleep for 100ms
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
String message =
"CumulativeCpuUsageEmulatorPlugin got interrupted. Exiting.";
throw new RuntimeException(message);
}
}
}
// set the last seen progress
lastSeenProgress = progress.getProgress();
// set the last seen usage
lastSeenCpuUsage = getCurrentCPUUsage();
}
}
}
@Override
public void initialize(Configuration conf, ResourceUsageMetrics metrics,
ResourceCalculatorPlugin monitor,
Progressive progress) {
this.monitor = monitor;
this.progress = progress;
// get the target CPU usage
targetCpuUsage = metrics.getCumulativeCpuUsage();
if (targetCpuUsage <= 0 ) {
enabled = false;
return;
} else {
enabled = true;
}
emulationInterval = conf.getFloat(CPU_EMULATION_PROGRESS_INTERVAL,
DEFAULT_EMULATION_FREQUENCY);
// calibrate the core cpu-usage utility
emulatorCore.calibrate(monitor, targetCpuUsage);
// initialize the states
lastSeenProgress = 0;
lastSeenCpuUsage = 0;
}
}
| 12,595 | 37.638037 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests distcp in combination with HDFS ACLs.
*/
public class TestDistCpWithAcls {
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem fs;
@BeforeClass
public static void init() throws Exception {
initCluster(true, true);
// Create this directory structure:
// /src
// /dir1
// /subdir1
// /dir2
// /dir2/file2
// /dir2/file3
// /dir3sticky
// /file1
fs.mkdirs(new Path("/src/dir1/subdir1"));
fs.mkdirs(new Path("/src/dir2"));
fs.create(new Path("/src/dir2/file2")).close();
fs.create(new Path("/src/dir2/file3")).close();
fs.mkdirs(new Path("/src/dir3sticky"));
fs.create(new Path("/src/file1")).close();
// Set a mix of ACLs and plain permissions throughout the tree.
fs.modifyAclEntries(new Path("/src/dir1"), Arrays.asList(
aclEntry(DEFAULT, USER, "bruce", ALL)));
fs.modifyAclEntries(new Path("/src/dir2/file2"), Arrays.asList(
aclEntry(ACCESS, GROUP, "sales", NONE)));
fs.setPermission(new Path("/src/dir2/file3"),
new FsPermission((short)0660));
fs.modifyAclEntries(new Path("/src/file1"), Arrays.asList(
aclEntry(ACCESS, USER, "diana", READ)));
fs.setPermission(new Path("/src/dir3sticky"),
new FsPermission((short)01777));
}
@AfterClass
public static void shutdown() {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testPreserveAcls() throws Exception {
assertRunDistCp(DistCpConstants.SUCCESS, "/dstPreserveAcls");
assertAclEntries("/dstPreserveAcls/dir1", new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, READ_EXECUTE) } );
assertPermission("/dstPreserveAcls/dir1", (short)0755);
assertAclEntries("/dstPreserveAcls/dir1/subdir1", new AclEntry[] { });
assertPermission("/dstPreserveAcls/dir1/subdir1", (short)0755);
assertAclEntries("/dstPreserveAcls/dir2", new AclEntry[] { });
assertPermission("/dstPreserveAcls/dir2", (short)0755);
assertAclEntries("/dstPreserveAcls/dir2/file2", new AclEntry[] {
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", NONE) } );
assertPermission("/dstPreserveAcls/dir2/file2", (short)0644);
assertAclEntries("/dstPreserveAcls/dir2/file3", new AclEntry[] { });
assertPermission("/dstPreserveAcls/dir2/file3", (short)0660);
assertAclEntries("/dstPreserveAcls/dir3sticky", new AclEntry[] { });
assertPermission("/dstPreserveAcls/dir3sticky", (short)01777);
assertAclEntries("/dstPreserveAcls/file1", new AclEntry[] {
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ) } );
assertPermission("/dstPreserveAcls/file1", (short)0644);
}
@Test
public void testAclsNotEnabled() throws Exception {
try {
restart(false);
assertRunDistCp(DistCpConstants.ACLS_NOT_SUPPORTED, "/dstAclsNotEnabled");
} finally {
restart(true);
}
}
@Test
public void testAclsNotImplemented() throws Exception {
assertRunDistCp(DistCpConstants.ACLS_NOT_SUPPORTED,
"stubfs://dstAclsNotImplemented");
}
/**
* Stub FileSystem implementation used for testing the case of attempting
* distcp with ACLs preserved on a file system that does not support ACLs.
* The base class implementation throws UnsupportedOperationException for the
* ACL methods, so we don't need to override them.
*/
public static class StubFileSystem extends FileSystem {
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return null;
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return false;
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return null;
}
@Override
public URI getUri() {
return URI.create("stubfs:///");
}
@Override
public Path getWorkingDirectory() {
return new Path(Path.SEPARATOR);
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
return null;
}
@Override
public boolean mkdirs(Path f, FsPermission permission)
throws IOException {
return false;
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return null;
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
return false;
}
@Override
public void setWorkingDirectory(Path dir) {
}
}
/**
* Create a new AclEntry with scope, type and permission (no name).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param permission FsAction set of permissions in the ACL entry
* @return AclEntry new AclEntry
*/
private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
FsAction permission) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setPermission(permission)
.build();
}
/**
* Create a new AclEntry with scope, type, name and permission.
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @param permission FsAction set of permissions in the ACL entry
* @return AclEntry new AclEntry
*/
private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
String name, FsAction permission) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setName(name)
.setPermission(permission)
.build();
}
/**
* Asserts the ACL entries returned by getAclStatus for a specific path.
*
* @param path String path to check
* @param entries AclEntry[] expected ACL entries
* @throws Exception if there is any error
*/
private static void assertAclEntries(String path, AclEntry[] entries)
throws Exception {
assertArrayEquals(entries, fs.getAclStatus(new Path(path)).getEntries()
.toArray(new AclEntry[0]));
}
/**
* Asserts the value of the FsPermission bits on the inode of a specific path.
*
* @param path String path to check
* @param perm short expected permission bits
* @throws Exception if there is any error
*/
private static void assertPermission(String path, short perm)
throws Exception {
assertEquals(perm,
fs.getFileStatus(new Path(path)).getPermission().toShort());
}
/**
* Runs distcp from /src to specified destination, preserving ACLs. Asserts
* expected exit code.
*
* @param int exitCode expected exit code
* @param dst String distcp destination
* @throws Exception if there is any error
*/
private static void assertRunDistCp(int exitCode, String dst)
throws Exception {
DistCp distCp = new DistCp(conf, null);
assertEquals(exitCode, ToolRunner.run(
conf, distCp, new String[] { "-pa", "/src", dst }));
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @param aclsEnabled if true, ACL support is enabled
* @throws Exception if any step fails
*/
private static void initCluster(boolean format, boolean aclsEnabled)
throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, aclsEnabled);
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
/**
* Restarts the cluster with ACLs enabled or disabled.
*
* @param aclsEnabled if true, ACL support is enabled
* @throws Exception if any step fails
*/
private static void restart(boolean aclsEnabled) throws Exception {
shutdown();
initCluster(false, aclsEnabled);
}
}
| 10,591 | 31.09697 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.util.ToolRunner;
/**
* A JUnit test for copying files recursively.
*/
public class TestDistCpSystem extends TestCase {
private static final String SRCDAT = "srcdat";
private static final String DSTDAT = "dstdat";
private class FileEntry {
String path;
boolean isDir;
public FileEntry(String path, boolean isDir) {
this.path = path;
this.isDir = isDir;
}
String getPath() { return path; }
boolean isDirectory() { return isDir; }
}
private void createFiles(FileSystem fs, String topdir,
FileEntry[] entries) throws IOException {
for (FileEntry entry : entries) {
Path newpath = new Path(topdir + "/" + entry.getPath());
if (entry.isDirectory()) {
fs.mkdirs(newpath);
} else {
OutputStream out = fs.create(newpath);
try {
out.write((topdir + "/" + entry).getBytes());
out.write("\n".getBytes());
} finally {
out.close();
}
}
}
}
private static FileStatus[] getFileStatus(FileSystem fs,
String topdir, FileEntry[] files) throws IOException {
Path root = new Path(topdir);
List<FileStatus> statuses = new ArrayList<FileStatus>();
for (int idx = 0; idx < files.length; ++idx) {
Path newpath = new Path(root, files[idx].getPath());
statuses.add(fs.getFileStatus(newpath));
}
return statuses.toArray(new FileStatus[statuses.size()]);
}
/** delete directory and everything underneath it.*/
private static void deldir(FileSystem fs, String topdir) throws IOException {
fs.delete(new Path(topdir), true);
}
private void testPreserveUserHelper(
FileEntry[] srcEntries,
FileEntry[] dstEntries,
boolean createSrcDir,
boolean createTgtDir,
boolean update) throws Exception {
Configuration conf = null;
MiniDFSCluster cluster = null;
try {
final String testRoot = "/testdir";
final String testSrcRel = SRCDAT;
final String testSrc = testRoot + "/" + testSrcRel;
final String testDstRel = DSTDAT;
final String testDst = testRoot + "/" + testDstRel;
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
String nnUri = FileSystem.getDefaultUri(conf).toString();
FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
fs.mkdirs(new Path(testRoot));
if (createSrcDir) {
fs.mkdirs(new Path(testSrc));
}
if (createTgtDir) {
fs.mkdirs(new Path(testDst));
}
createFiles(fs, testRoot, srcEntries);
FileStatus[] srcstats = getFileStatus(fs, testRoot, srcEntries);
for(int i = 0; i < srcEntries.length; i++) {
fs.setOwner(srcstats[i].getPath(), "u" + i, null);
}
String[] args = update? new String[]{"-pu", "-update", nnUri+testSrc,
nnUri+testDst} : new String[]{"-pu", nnUri+testSrc, nnUri+testDst};
ToolRunner.run(conf, new DistCp(), args);
String realTgtPath = testDst;
if (!createTgtDir) {
realTgtPath = testRoot;
}
FileStatus[] dststat = getFileStatus(fs, realTgtPath, dstEntries);
for(int i = 0; i < dststat.length; i++) {
assertEquals("i=" + i, "u" + i, dststat[i].getOwner());
}
deldir(fs, testRoot);
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
public void testPreserveUseNonEmptyDir() throws Exception {
FileEntry[] srcfiles = {
new FileEntry(SRCDAT, true),
new FileEntry(SRCDAT + "/a", false),
new FileEntry(SRCDAT + "/b", true),
new FileEntry(SRCDAT + "/b/c", false)
};
FileEntry[] dstfiles = {
new FileEntry(DSTDAT, true),
new FileEntry(DSTDAT + "/a", false),
new FileEntry(DSTDAT + "/b", true),
new FileEntry(DSTDAT + "/b/c", false)
};
testPreserveUserHelper(srcfiles, srcfiles, false, true, false);
testPreserveUserHelper(srcfiles, dstfiles, false, false, false);
}
public void testPreserveUserEmptyDir() throws Exception {
FileEntry[] srcfiles = {
new FileEntry(SRCDAT, true)
};
FileEntry[] dstfiles = {
new FileEntry(DSTDAT, true)
};
testPreserveUserHelper(srcfiles, srcfiles, false, true, false);
testPreserveUserHelper(srcfiles, dstfiles, false, false, false);
}
public void testPreserveUserSingleFile() throws Exception {
FileEntry[] srcfiles = {
new FileEntry(SRCDAT, false)
};
FileEntry[] dstfiles = {
new FileEntry(DSTDAT, false)
};
testPreserveUserHelper(srcfiles, srcfiles, false, true, false);
testPreserveUserHelper(srcfiles, dstfiles, false, false, false);
}
public void testPreserveUserNonEmptyDirWithUpdate() throws Exception {
FileEntry[] srcfiles = {
new FileEntry(SRCDAT + "/a", false),
new FileEntry(SRCDAT + "/b", true),
new FileEntry(SRCDAT + "/b/c", false)
};
FileEntry[] dstfiles = {
new FileEntry("a", false),
new FileEntry("b", true),
new FileEntry("b/c", false)
};
testPreserveUserHelper(srcfiles, dstfiles, true, true, true);
}
}
| 6,477 | 30.754902 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tools.util.DistCpTestUtils;
import org.apache.hadoop.util.Progressable;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Maps;
/**
* Tests distcp in combination with HDFS XAttrs.
*/
public class TestDistCpWithXAttrs {
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem fs;
//XAttrs
private static final String name1 = "user.a1";
private static final byte[] value1 = {0x31, 0x32, 0x33};
private static final String name2 = "trusted.a2";
private static final byte[] value2 = {0x37, 0x38, 0x39};
private static final String name3 = "user.a3";
private static final byte[] value3 = null;
private static final String name4 = "user.a4";
private static final byte[] value4 = null;
private static final Path dir1 = new Path("/src/dir1");
private static final Path subDir1 = new Path(dir1, "subdir1");
private static final Path file1 = new Path("/src/file1");
private static final Path dir2 = new Path("/src/dir2");
private static final Path file2 = new Path(dir2, "file2");
private static final Path file3 = new Path(dir2, "file3");
private static final Path file4 = new Path(dir2, "file4");
private static final Path dstDir1 = new Path("/dstPreserveXAttrs/dir1");
private static final Path dstSubDir1 = new Path(dstDir1, "subdir1");
private static final Path dstFile1 = new Path("/dstPreserveXAttrs/file1");
private static final Path dstDir2 = new Path("/dstPreserveXAttrs/dir2");
private static final Path dstFile2 = new Path(dstDir2, "file2");
private static final Path dstFile3 = new Path(dstDir2, "file3");
private static final Path dstFile4 = new Path(dstDir2, "file4");
private static final String rootedSrcName = "/src";
@BeforeClass
public static void init() throws Exception {
initCluster(true, true);
fs.mkdirs(subDir1);
fs.create(file1).close();
fs.mkdirs(dir2);
fs.create(file2).close();
fs.create(file3).close();
fs.create(file4).close();
// dir1
fs.setXAttr(dir1, name1, value1);
fs.setXAttr(dir1, name2, value2);
// subDir1
fs.setXAttr(subDir1, name1, value1);
fs.setXAttr(subDir1, name3, value3);
// file1
fs.setXAttr(file1, name1, value1);
fs.setXAttr(file1, name2, value2);
fs.setXAttr(file1, name3, value3);
// dir2
fs.setXAttr(dir2, name2, value2);
// file2
fs.setXAttr(file2, name1, value1);
fs.setXAttr(file2, name4, value4);
// file3
fs.setXAttr(file3, name3, value3);
fs.setXAttr(file3, name4, value4);
}
@AfterClass
public static void shutdown() {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testPreserveXAttrs() throws Exception {
DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, rootedSrcName,
"/dstPreserveXAttrs", "-px", conf);
// dstDir1
Map<String, byte[]> xAttrs = Maps.newHashMap();
xAttrs.put(name1, value1);
xAttrs.put(name2, value2);
DistCpTestUtils.assertXAttrs(dstDir1, fs, xAttrs);
// dstSubDir1
xAttrs.clear();
xAttrs.put(name1, value1);
xAttrs.put(name3, new byte[0]);
DistCpTestUtils.assertXAttrs(dstSubDir1, fs, xAttrs);
// dstFile1
xAttrs.clear();
xAttrs.put(name1, value1);
xAttrs.put(name2, value2);
xAttrs.put(name3, new byte[0]);
DistCpTestUtils.assertXAttrs(dstFile1, fs, xAttrs);
// dstDir2
xAttrs.clear();
xAttrs.put(name2, value2);
DistCpTestUtils.assertXAttrs(dstDir2, fs, xAttrs);
// dstFile2
xAttrs.clear();
xAttrs.put(name1, value1);
xAttrs.put(name4, new byte[0]);
DistCpTestUtils.assertXAttrs(dstFile2, fs, xAttrs);
// dstFile3
xAttrs.clear();
xAttrs.put(name3, new byte[0]);
xAttrs.put(name4, new byte[0]);
DistCpTestUtils.assertXAttrs(dstFile3, fs, xAttrs);
// dstFile4
xAttrs.clear();
DistCpTestUtils.assertXAttrs(dstFile4, fs, xAttrs);
}
@Test
public void testXAttrsNotEnabled() throws Exception {
try {
restart(false);
DistCpTestUtils.assertRunDistCp(DistCpConstants.XATTRS_NOT_SUPPORTED,
rootedSrcName, "/dstXAttrsNotEnabled", "-px", conf);
} finally {
restart(true);
}
}
@Test
public void testXAttrsNotImplemented() throws Exception {
DistCpTestUtils.assertRunDistCp(DistCpConstants.XATTRS_NOT_SUPPORTED,
rootedSrcName, "stubfs://dstXAttrsNotImplemented", "-px", conf);
}
/**
* Stub FileSystem implementation used for testing the case of attempting
* distcp with XAttrs preserved on a file system that does not support XAttrs.
* The base class implementation throws UnsupportedOperationException for
* the XAttr methods, so we don't need to override them.
*/
public static class StubFileSystem extends FileSystem {
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return null;
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return false;
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return null;
}
@Override
public URI getUri() {
return URI.create("stubfs:///");
}
@Override
public Path getWorkingDirectory() {
return new Path(Path.SEPARATOR);
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
return null;
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
return false;
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return null;
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
return false;
}
@Override
public void setWorkingDirectory(Path dir) {
}
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @param xAttrsEnabled if true, XAttr support is enabled
* @throws Exception if any step fails
*/
private static void initCluster(boolean format, boolean xAttrsEnabled)
throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, xAttrsEnabled);
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
/**
* Restarts the cluster with XAttrs enabled or disabled.
*
* @param xAttrsEnabled if true, XAttr support is enabled
* @throws Exception if any step fails
*/
private static void restart(boolean xAttrsEnabled) throws Exception {
shutdown();
initCluster(false, xAttrsEnabled);
}
}
| 8,803 | 30.219858 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tools.util.DistCpTestUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Maps;
/**
* Tests distcp in combination with HDFS raw.* XAttrs.
*/
public class TestDistCpWithRawXAttrs {
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem fs;
private static final String rawName1 = "raw.a1";
private static final byte[] rawValue1 = {0x37, 0x38, 0x39};
private static final String userName1 = "user.a1";
private static final byte[] userValue1 = {0x38, 0x38, 0x38};
private static final Path dir1 = new Path("/src/dir1");
private static final Path subDir1 = new Path(dir1, "subdir1");
private static final Path file1 = new Path("/src/file1");
private static final String rawRootName = "/.reserved/raw";
private static final String rootedDestName = "/dest";
private static final String rootedSrcName = "/src";
private static final String rawDestName = "/.reserved/raw/dest";
private static final String rawSrcName = "/.reserved/raw/src";
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@AfterClass
public static void shutdown() {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
/* Test that XAttrs and raw.* XAttrs are preserved when appropriate. */
@Test
public void testPreserveRawXAttrs1() throws Exception {
final String relSrc = "/./.reserved/../.reserved/raw/../raw/src/../src";
final String relDst = "/./.reserved/../.reserved/raw/../raw/dest/../dest";
doTestPreserveRawXAttrs(relSrc, relDst, "-px", true, true,
DistCpConstants.SUCCESS);
doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, "-px",
false, true, DistCpConstants.SUCCESS);
doTestPreserveRawXAttrs(rootedSrcName, rawDestName, "-px",
false, true, DistCpConstants.INVALID_ARGUMENT);
doTestPreserveRawXAttrs(rawSrcName, rootedDestName, "-px",
false, true, DistCpConstants.INVALID_ARGUMENT);
doTestPreserveRawXAttrs(rawSrcName, rawDestName, "-px",
true, true, DistCpConstants.SUCCESS);
final Path savedWd = fs.getWorkingDirectory();
try {
fs.setWorkingDirectory(new Path("/.reserved/raw"));
doTestPreserveRawXAttrs("../.." + rawSrcName, "../.." + rawDestName,
"-px", true, true, DistCpConstants.SUCCESS);
} finally {
fs.setWorkingDirectory(savedWd);
}
}
/* Test that XAttrs are not preserved and raw.* are when appropriate. */
@Test
public void testPreserveRawXAttrs2() throws Exception {
doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, "-p",
false, false, DistCpConstants.SUCCESS);
doTestPreserveRawXAttrs(rootedSrcName, rawDestName, "-p",
false, false, DistCpConstants.INVALID_ARGUMENT);
doTestPreserveRawXAttrs(rawSrcName, rootedDestName, "-p",
false, false, DistCpConstants.INVALID_ARGUMENT);
doTestPreserveRawXAttrs(rawSrcName, rawDestName, "-p",
true, false, DistCpConstants.SUCCESS);
}
/* Test that XAttrs are not preserved and raw.* are when appropriate. */
@Test
public void testPreserveRawXAttrs3() throws Exception {
doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, null,
false, false, DistCpConstants.SUCCESS);
doTestPreserveRawXAttrs(rootedSrcName, rawDestName, null,
false, false, DistCpConstants.INVALID_ARGUMENT);
doTestPreserveRawXAttrs(rawSrcName, rootedDestName, null,
false, false, DistCpConstants.INVALID_ARGUMENT);
doTestPreserveRawXAttrs(rawSrcName, rawDestName, null,
true, false, DistCpConstants.SUCCESS);
}
private static Path[] pathnames = { new Path("dir1"),
new Path("dir1/subdir1"),
new Path("file1") };
private static void makeFilesAndDirs(FileSystem fs) throws Exception {
fs.delete(new Path("/src"), true);
fs.delete(new Path("/dest"), true);
fs.mkdirs(subDir1);
fs.create(file1).close();
}
private void initXAttrs() throws Exception {
makeFilesAndDirs(fs);
for (Path p : pathnames) {
fs.setXAttr(new Path(rawRootName + "/src", p), rawName1, rawValue1);
fs.setXAttr(new Path(rawRootName + "/src", p), userName1, userValue1);
}
}
private void doTestPreserveRawXAttrs(String src, String dest,
String preserveOpts, boolean expectRaw, boolean expectUser,
int expectedExitCode) throws Exception {
initXAttrs();
DistCpTestUtils.assertRunDistCp(expectedExitCode, src, dest,
preserveOpts, conf);
if (expectedExitCode == DistCpConstants.SUCCESS) {
Map<String, byte[]> xAttrs = Maps.newHashMap();
for (Path p : pathnames) {
xAttrs.clear();
if (expectRaw) {
xAttrs.put(rawName1, rawValue1);
}
if (expectUser) {
xAttrs.put(userName1, userValue1);
}
DistCpTestUtils.assertXAttrs(new Path(dest, p), fs, xAttrs);
}
}
}
}
| 6,433 | 36.625731 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.security.Credentials;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.DataOutputStream;
import java.net.URI;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class TestGlobbedCopyListing {
private static MiniDFSCluster cluster;
private static final Credentials CREDENTIALS = new Credentials();
public static Map<String, String> expectedValues = new HashMap<String, String>();
@BeforeClass
public static void setup() throws Exception {
cluster = new MiniDFSCluster.Builder(new Configuration()).build();
createSourceData();
}
private static void createSourceData() throws Exception {
mkdirs("/tmp/source/1");
mkdirs("/tmp/source/2");
mkdirs("/tmp/source/2/3");
mkdirs("/tmp/source/2/3/4");
mkdirs("/tmp/source/5");
touchFile("/tmp/source/5/6");
mkdirs("/tmp/source/7");
mkdirs("/tmp/source/7/8");
touchFile("/tmp/source/7/8/9");
}
private static void mkdirs(String path) throws Exception {
FileSystem fileSystem = null;
try {
fileSystem = cluster.getFileSystem();
fileSystem.mkdirs(new Path(path));
recordInExpectedValues(path);
}
finally {
IOUtils.cleanup(null, fileSystem);
}
}
private static void touchFile(String path) throws Exception {
FileSystem fileSystem = null;
DataOutputStream outputStream = null;
try {
fileSystem = cluster.getFileSystem();
outputStream = fileSystem.create(new Path(path), true, 0);
recordInExpectedValues(path);
}
finally {
IOUtils.cleanup(null, fileSystem, outputStream);
}
}
private static void recordInExpectedValues(String path) throws Exception {
FileSystem fileSystem = cluster.getFileSystem();
Path sourcePath = new Path(fileSystem.getUri().toString() + path);
expectedValues.put(sourcePath.toString(), DistCpUtils.getRelativePath(
new Path("/tmp/source"), sourcePath));
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
@Test
public void testRun() throws Exception {
final URI uri = cluster.getFileSystem().getUri();
final String pathString = uri.toString();
Path fileSystemPath = new Path(pathString);
Path source = new Path(fileSystemPath.toString() + "/tmp/source");
Path target = new Path(fileSystemPath.toString() + "/tmp/target");
Path listingPath = new Path(fileSystemPath.toString() + "/tmp/META/fileList.seq");
DistCpOptions options = new DistCpOptions(Arrays.asList(source), target);
options.setTargetPathExists(false);
new GlobbedCopyListing(new Configuration(), CREDENTIALS).buildListing(listingPath, options);
verifyContents(listingPath);
}
private void verifyContents(Path listingPath) throws Exception {
SequenceFile.Reader reader = new SequenceFile.Reader(cluster.getFileSystem(),
listingPath, new Configuration());
Text key = new Text();
CopyListingFileStatus value = new CopyListingFileStatus();
Map<String, String> actualValues = new HashMap<String, String>();
while (reader.next(key, value)) {
if (value.isDirectory() && key.toString().equals("")) {
// ignore root with empty relPath, which is an entry to be
// used for preserving root attributes etc.
continue;
}
actualValues.put(value.getPath().toString(), key.toString());
}
Assert.assertEquals(expectedValues.size(), actualValues.size());
for (Map.Entry<String, String> entry : actualValues.entrySet()) {
Assert.assertEquals(entry.getValue(), expectedValues.get(entry.getKey()));
}
}
}
| 4,907 | 34.057143 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import static org.junit.Assert.fail;
import org.junit.Assert;
import org.junit.Test;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.tools.DistCpOptions.*;
import org.apache.hadoop.conf.Configuration;
import java.util.Iterator;
import java.util.NoSuchElementException;
public class TestOptionsParser {
@Test
public void testParseIgnoreFailure() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldIgnoreFailures());
options = OptionsParser.parse(new String[] {
"-i",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldIgnoreFailures());
}
@Test
public void testParseOverwrite() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldOverwrite());
options = OptionsParser.parse(new String[] {
"-overwrite",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldOverwrite());
try {
OptionsParser.parse(new String[] {
"-update",
"-overwrite",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Update and overwrite aren't allowed together");
} catch (IllegalArgumentException ignore) {
}
}
@Test
public void testLogPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getLogPath());
options = OptionsParser.parse(new String[] {
"-log",
"hdfs://localhost:8020/logs",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:8020/logs"));
}
@Test
public void testParseBlokcing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldBlock());
options = OptionsParser.parse(new String[] {
"-async",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldBlock());
}
@Test
public void testParsebandwidth() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMapBandwidth(), DistCpConstants.DEFAULT_BANDWIDTH_MB);
options = OptionsParser.parse(new String[] {
"-bandwidth",
"11",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMapBandwidth(), 11);
}
@Test(expected=IllegalArgumentException.class)
public void testParseNonPositiveBandwidth() {
OptionsParser.parse(new String[] {
"-bandwidth",
"-11",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
}
@Test(expected=IllegalArgumentException.class)
public void testParseZeroBandwidth() {
OptionsParser.parse(new String[] {
"-bandwidth",
"0",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
}
@Test
public void testParseSkipCRC() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldSkipCRC());
options = OptionsParser.parse(new String[] {
"-update",
"-skipcrccheck",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldSkipCRC());
}
@Test
public void testParseAtomicCommit() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldAtomicCommit());
options = OptionsParser.parse(new String[] {
"-atomic",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldAtomicCommit());
try {
OptionsParser.parse(new String[] {
"-atomic",
"-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Atomic and sync folders were allowed");
} catch (IllegalArgumentException ignore) { }
}
@Test
public void testParseWorkPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getAtomicWorkPath());
options = OptionsParser.parse(new String[] {
"-atomic",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getAtomicWorkPath());
options = OptionsParser.parse(new String[] {
"-atomic",
"-tmp",
"hdfs://localhost:8020/work",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:8020/work"));
try {
OptionsParser.parse(new String[] {
"-tmp",
"hdfs://localhost:8020/work",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("work path was allowed without -atomic switch");
} catch (IllegalArgumentException ignore) {}
}
@Test
public void testParseSyncFolders() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldSyncFolder());
options = OptionsParser.parse(new String[] {
"-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
}
@Test
public void testParseDeleteMissing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldDeleteMissing());
options = OptionsParser.parse(new String[] {
"-update",
"-delete",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldDeleteMissing());
options = OptionsParser.parse(new String[] {
"-overwrite",
"-delete",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldOverwrite());
Assert.assertTrue(options.shouldDeleteMissing());
try {
OptionsParser.parse(new String[] {
"-atomic",
"-delete",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Atomic and delete folders were allowed");
} catch (IllegalArgumentException ignore) { }
}
@Test
public void testParseSSLConf() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getSslConfigurationFile());
options = OptionsParser.parse(new String[] {
"-mapredSslConf",
"/tmp/ssl-client.xml",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getSslConfigurationFile(), "/tmp/ssl-client.xml");
}
@Test
public void testParseMaps() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMaxMaps(), DistCpConstants.DEFAULT_MAPS);
options = OptionsParser.parse(new String[] {
"-m",
"1",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMaxMaps(), 1);
options = OptionsParser.parse(new String[] {
"-m",
"0",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMaxMaps(), 1);
try {
OptionsParser.parse(new String[] {
"-m",
"hello",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Non numberic map parsed");
} catch (IllegalArgumentException ignore) { }
try {
OptionsParser.parse(new String[] {
"-mapredXslConf",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Non numberic map parsed");
} catch (IllegalArgumentException ignore) { }
}
@Test
public void testParseNumListstatusThreads() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
// If command line argument isn't set, we expect .getNumListstatusThreads
// option to be zero (so that we know when to override conf properties).
Assert.assertEquals(0, options.getNumListstatusThreads());
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"12",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(12, options.getNumListstatusThreads());
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"0",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(0, options.getNumListstatusThreads());
try {
OptionsParser.parse(new String[] {
"--numListstatusThreads",
"hello",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Non numberic numListstatusThreads parsed");
} catch (IllegalArgumentException ignore) { }
// Ignore large number of threads.
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"100",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(DistCpOptions.maxNumListstatusThreads,
options.getNumListstatusThreads());
}
@Test
public void testSourceListing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getSourceFileListing(),
new Path("hdfs://localhost:8020/source/first"));
}
@Test
public void testSourceListingAndSourcePath() {
try {
OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Both source listing & source paths allowed");
} catch (IllegalArgumentException ignore) {}
}
@Test
public void testMissingSourceInfo() {
try {
OptionsParser.parse(new String[] {
"hdfs://localhost:8020/target/"});
Assert.fail("Neither source listing not source paths present");
} catch (IllegalArgumentException ignore) {}
}
@Test
public void testMissingTarget() {
try {
OptionsParser.parse(new String[] {
"-f", "hdfs://localhost:8020/source"});
Assert.fail("Missing target allowed");
} catch (IllegalArgumentException ignore) {}
}
@Test
public void testInvalidArgs() {
try {
OptionsParser.parse(new String[] {
"-m", "-f", "hdfs://localhost:8020/source"});
Assert.fail("Missing map value");
} catch (IllegalArgumentException ignore) {}
}
@Test
public void testToString() {
DistCpOptions option = new DistCpOptions(new Path("abc"), new Path("xyz"));
String val = "DistCpOptions{atomicCommit=false, syncFolder=false, deleteMissing=false, " +
"ignoreFailures=false, maxMaps=20, sslConfigurationFile='null', copyStrategy='uniformsize', " +
"sourceFileListing=abc, sourcePaths=null, targetPath=xyz, targetPathExists=true, " +
"preserveRawXattrs=false, filtersFile='null'}";
Assert.assertEquals(val, option.toString());
Assert.assertNotSame(DistCpOptionSwitch.ATOMIC_COMMIT.toString(),
DistCpOptionSwitch.ATOMIC_COMMIT.name());
}
@Test
public void testCopyStrategy() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-strategy",
"dynamic",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getCopyStrategy(), "dynamic");
options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getCopyStrategy(), DistCpConstants.UNIFORMSIZE);
}
@Test
public void testTargetPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:8020/target/"));
}
@Test
public void testPreserve() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
options = OptionsParser.parse(new String[] {
"-p",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options = OptionsParser.parse(new String[] {
"-p",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options = OptionsParser.parse(new String[] {
"-pbr",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options = OptionsParser.parse(new String[] {
"-pbrgup",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options = OptionsParser.parse(new String[] {
"-pbrgupcax",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL));
Assert.assertTrue(options.shouldPreserve(FileAttribute.XATTR));
options = OptionsParser.parse(new String[] {
"-pc",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options = OptionsParser.parse(new String[] {
"-p",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
int i = 0;
Iterator<FileAttribute> attribIterator = options.preserveAttributes();
while (attribIterator.hasNext()) {
attribIterator.next();
i++;
}
Assert.assertEquals(i, DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.length() - 2);
try {
OptionsParser.parse(new String[] {
"-pabcd",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target"});
Assert.fail("Invalid preserve attribute");
}
catch (IllegalArgumentException ignore) {}
catch (NoSuchElementException ignore) {}
options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
options.preserve(FileAttribute.PERMISSION);
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
options.preserve(FileAttribute.PERMISSION);
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
}
@Test
public void testOptionsSwitchAddToConf() {
Configuration conf = new Configuration();
Assert.assertNull(conf.get(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel()));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.ATOMIC_COMMIT);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
}
@Test
public void testOptionsAppendToConf() {
Configuration conf = new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false));
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
DistCpOptions options = OptionsParser.parse(new String[] {
"-atomic",
"-i",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(), -1),
DistCpConstants.DEFAULT_BANDWIDTH_MB);
conf = new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false));
Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()), null);
options = OptionsParser.parse(new String[] {
"-update",
"-delete",
"-pu",
"-bandwidth",
"11",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false));
Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()), "U");
Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(), -1), 11);
}
@Test
public void testAppendOption() {
Configuration conf = new Configuration();
Assert.assertFalse(conf.getBoolean(
DistCpOptionSwitch.APPEND.getConfigLabel(), false));
Assert.assertFalse(conf.getBoolean(
DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
"-append", "hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(
DistCpOptionSwitch.APPEND.getConfigLabel(), false));
Assert.assertTrue(conf.getBoolean(
DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
// make sure -append is only valid when -update is specified
try {
OptionsParser.parse(new String[] { "-append",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("Append should fail if update option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Append is valid only with update options", e);
}
// make sure -append is invalid when skipCrc is specified
try {
OptionsParser.parse(new String[] {
"-append", "-update", "-skipcrccheck",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("Append should fail if skipCrc option is specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Append is disallowed when skipping CRC", e);
}
}
@Test
public void testDiffOption() {
Configuration conf = new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DIFF.getConfigLabel(),
false));
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
"-delete", "-diff", "s1", "s2",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DIFF.getConfigLabel(), false));
Assert.assertTrue(options.shouldUseDiff());
Assert.assertEquals("s1", options.getFromSnapshot());
Assert.assertEquals("s2", options.getToSnapshot());
options = OptionsParser.parse(new String[] {
"-delete", "-diff", "s1", ".", "-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DIFF.getConfigLabel(),
false));
Assert.assertTrue(options.shouldUseDiff());
Assert.assertEquals("s1", options.getFromSnapshot());
Assert.assertEquals(".", options.getToSnapshot());
// -diff requires two option values
try {
OptionsParser.parse(new String[] {"-diff", "s1", "-delete", "-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("-diff should fail with only one snapshot name");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Must provide both the starting and ending snapshot names", e);
}
// make sure -diff is only valid when -update and -delete is specified
try {
OptionsParser.parse(new String[] { "-diff", "s1", "s2",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("-diff should fail if -update or -delete option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Diff is valid only with update and delete options", e);
}
try {
OptionsParser.parse(new String[] { "-diff", "s1", "s2", "-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("-diff should fail if -update or -delete option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Diff is valid only with update and delete options", e);
}
try {
OptionsParser.parse(new String[] { "-diff", "s1", "s2",
"-delete", "-overwrite",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("-diff should fail if -update or -delete option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Diff is valid only with update and delete options", e);
}
}
@Test
public void testExclusionsOption() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getFiltersFile());
options = OptionsParser.parse(new String[] {
"-filters",
"/tmp/filters.txt",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getFiltersFile(), "/tmp/filters.txt");
}
}
| 28,359 | 37.480326 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.tools.util.TestDistCpUtils;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
@RunWith(value = Parameterized.class)
public class TestIntegration {
private static final Log LOG = LogFactory.getLog(TestIntegration.class);
private static FileSystem fs;
private static Path listFile;
private static Path target;
private static String root;
private int numListstatusThreads;
public TestIntegration(int numListstatusThreads) {
this.numListstatusThreads = numListstatusThreads;
}
@Parameters
public static Collection<Object[]> data() {
Object[][] data = new Object[][] { { 1 }, { 2 }, { 10 } };
return Arrays.asList(data);
}
private static Configuration getConf() {
Configuration conf = new Configuration();
conf.set("fs.default.name", "file:///");
conf.set("mapred.job.tracker", "local");
return conf;
}
@BeforeClass
public static void setup() {
try {
fs = FileSystem.get(getConf());
listFile = new Path("target/tmp/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
target = new Path("target/tmp/target").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
root = new Path("target/tmp").makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString();
TestDistCpUtils.delete(fs, root);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
}
}
@Test(timeout=100000)
public void testSingleFileMissingTarget() {
caseSingleFileMissingTarget(false);
caseSingleFileMissingTarget(true);
}
private void caseSingleFileMissingTarget(boolean sync) {
try {
addEntries(listFile, "singlefile1/file1");
createFiles("singlefile1/file1");
runTest(listFile, target, false, sync);
checkResult(target, 1);
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testSingleFileTargetFile() {
caseSingleFileTargetFile(false);
caseSingleFileTargetFile(true);
}
private void caseSingleFileTargetFile(boolean sync) {
try {
addEntries(listFile, "singlefile1/file1");
createFiles("singlefile1/file1", "target");
runTest(listFile, target, false, sync);
checkResult(target, 1);
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testSingleFileTargetDir() {
caseSingleFileTargetDir(false);
caseSingleFileTargetDir(true);
}
private void caseSingleFileTargetDir(boolean sync) {
try {
addEntries(listFile, "singlefile2/file2");
createFiles("singlefile2/file2");
mkdirs(target.toString());
runTest(listFile, target, true, sync);
checkResult(target, 1, "file2");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testSingleDirTargetMissing() {
caseSingleDirTargetMissing(false);
caseSingleDirTargetMissing(true);
}
private void caseSingleDirTargetMissing(boolean sync) {
try {
addEntries(listFile, "singledir");
mkdirs(root + "/singledir/dir1");
runTest(listFile, target, false, sync);
checkResult(target, 1, "dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testSingleDirTargetPresent() {
try {
addEntries(listFile, "singledir");
mkdirs(root + "/singledir/dir1");
mkdirs(target.toString());
runTest(listFile, target, true, false);
checkResult(target, 1, "singledir/dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testUpdateSingleDirTargetPresent() {
try {
addEntries(listFile, "Usingledir");
mkdirs(root + "/Usingledir/Udir1");
mkdirs(target.toString());
runTest(listFile, target, true, true);
checkResult(target, 1, "Udir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testMultiFileTargetPresent() {
caseMultiFileTargetPresent(false);
caseMultiFileTargetPresent(true);
}
private void caseMultiFileTargetPresent(boolean sync) {
try {
addEntries(listFile, "multifile/file3", "multifile/file4", "multifile/file5");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
mkdirs(target.toString());
runTest(listFile, target, true, sync);
checkResult(target, 3, "file3", "file4", "file5");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testMultiFileTargetMissing() {
caseMultiFileTargetMissing(false);
caseMultiFileTargetMissing(true);
}
private void caseMultiFileTargetMissing(boolean sync) {
try {
addEntries(listFile, "multifile/file3", "multifile/file4", "multifile/file5");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
runTest(listFile, target, false, sync);
checkResult(target, 3, "file3", "file4", "file5");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testMultiDirTargetPresent() {
try {
addEntries(listFile, "multifile", "singledir");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
mkdirs(target.toString(), root + "/singledir/dir1");
runTest(listFile, target, true, false);
checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5", "singledir/dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testUpdateMultiDirTargetPresent() {
try {
addEntries(listFile, "Umultifile", "Usingledir");
createFiles("Umultifile/Ufile3", "Umultifile/Ufile4", "Umultifile/Ufile5");
mkdirs(target.toString(), root + "/Usingledir/Udir1");
runTest(listFile, target, true, true);
checkResult(target, 4, "Ufile3", "Ufile4", "Ufile5", "Udir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testMultiDirTargetMissing() {
try {
addEntries(listFile, "multifile", "singledir");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
mkdirs(root + "/singledir/dir1");
runTest(listFile, target, false, false);
checkResult(target, 2, "multifile/file3", "multifile/file4",
"multifile/file5", "singledir/dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testUpdateMultiDirTargetMissing() {
try {
addEntries(listFile, "multifile", "singledir");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
mkdirs(root + "/singledir/dir1");
runTest(listFile, target, false, true);
checkResult(target, 4, "file3", "file4", "file5", "dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test(timeout=100000)
public void testDeleteMissingInDestination() {
try {
addEntries(listFile, "srcdir");
createFiles("srcdir/file1", "dstdir/file1", "dstdir/file2");
Path target = new Path(root + "/dstdir");
runTest(listFile, target, false, true, true, false);
checkResult(target, 1, "file1");
} catch (IOException e) {
LOG.error("Exception encountered while running distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test(timeout=100000)
public void testOverwrite() {
byte[] contents1 = "contents1".getBytes();
byte[] contents2 = "contents2".getBytes();
Assert.assertEquals(contents1.length, contents2.length);
try {
addEntries(listFile, "srcdir");
createWithContents("srcdir/file1", contents1);
createWithContents("dstdir/file1", contents2);
Path target = new Path(root + "/dstdir");
runTest(listFile, target, false, false, false, true);
checkResult(target, 1, "file1");
// make sure dstdir/file1 has been overwritten with the contents
// of srcdir/file1
FSDataInputStream is = fs.open(new Path(root + "/dstdir/file1"));
byte[] dstContents = new byte[contents1.length];
is.readFully(dstContents);
is.close();
Assert.assertArrayEquals(contents1, dstContents);
} catch (IOException e) {
LOG.error("Exception encountered while running distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test(timeout=100000)
public void testGlobTargetMissingSingleLevel() {
try {
Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
addEntries(listFile, "*");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
createFiles("singledir/dir2/file6");
runTest(listFile, target, false, false);
checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5",
"singledir/dir2/file6");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test(timeout=100000)
public void testUpdateGlobTargetMissingSingleLevel() {
try {
Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
addEntries(listFile, "*");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
createFiles("singledir/dir2/file6");
runTest(listFile, target, false, true);
checkResult(target, 4, "file3", "file4", "file5", "dir2/file6");
} catch (IOException e) {
LOG.error("Exception encountered while running distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test(timeout=100000)
public void testGlobTargetMissingMultiLevel() {
try {
Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
addEntries(listFile, "*/*");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
createFiles("singledir1/dir3/file7", "singledir1/dir3/file8",
"singledir1/dir3/file9");
runTest(listFile, target, false, false);
checkResult(target, 4, "file3", "file4", "file5",
"dir3/file7", "dir3/file8", "dir3/file9");
} catch (IOException e) {
LOG.error("Exception encountered while running distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test(timeout=100000)
public void testUpdateGlobTargetMissingMultiLevel() {
try {
Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
addEntries(listFile, "*/*");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
createFiles("singledir1/dir3/file7", "singledir1/dir3/file8",
"singledir1/dir3/file9");
runTest(listFile, target, false, true);
checkResult(target, 6, "file3", "file4", "file5",
"file7", "file8", "file9");
} catch (IOException e) {
LOG.error("Exception encountered while running distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test(timeout=100000)
public void testCleanup() {
try {
Path sourcePath = new Path("noscheme:///file");
List<Path> sources = new ArrayList<Path>();
sources.add(sourcePath);
DistCpOptions options = new DistCpOptions(sources, target);
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(
new Cluster(conf), conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
try {
new DistCp(conf, options).execute();
} catch (Throwable t) {
Assert.assertEquals(stagingDir.getFileSystem(conf).
listStatus(stagingDir).length, 0);
}
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("testCleanup failed " + e.getMessage());
}
}
private void addEntries(Path listFile, String... entries) throws IOException {
OutputStream out = fs.create(listFile);
try {
for (String entry : entries){
out.write((root + "/" + entry).getBytes());
out.write("\n".getBytes());
}
} finally {
out.close();
}
}
private void createFiles(String... entries) throws IOException {
for (String entry : entries){
OutputStream out = fs.create(new Path(root + "/" + entry));
try {
out.write((root + "/" + entry).getBytes());
out.write("\n".getBytes());
} finally {
out.close();
}
}
}
private void createWithContents(String entry, byte[] contents) throws IOException {
OutputStream out = fs.create(new Path(root + "/" + entry));
try {
out.write(contents);
} finally {
out.close();
}
}
private void mkdirs(String... entries) throws IOException {
for (String entry : entries){
fs.mkdirs(new Path(entry));
}
}
private void runTest(Path listFile, Path target, boolean targetExists,
boolean sync) throws IOException {
runTest(listFile, target, targetExists, sync, false, false);
}
private void runTest(Path listFile, Path target, boolean targetExists,
boolean sync, boolean delete,
boolean overwrite) throws IOException {
DistCpOptions options = new DistCpOptions(listFile, target);
options.setSyncFolder(sync);
options.setDeleteMissing(delete);
options.setOverwrite(overwrite);
options.setTargetPathExists(targetExists);
options.setNumListstatusThreads(numListstatusThreads);
try {
new DistCp(getConf(), options).execute();
} catch (Exception e) {
LOG.error("Exception encountered ", e);
throw new IOException(e);
}
}
private void checkResult(Path target, int count, String... relPaths) throws IOException {
Assert.assertEquals(count, fs.listStatus(target).length);
if (relPaths == null || relPaths.length == 0) {
Assert.assertTrue(target.toString(), fs.exists(target));
return;
}
for (String relPath : relPaths) {
Assert.assertTrue(new Path(target, relPath).toString(), fs.exists(new Path(target, relPath)));
}
}
}
| 18,159 | 29.884354 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.tools.mapred.CopyMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class TestDistCpSync {
private MiniDFSCluster cluster;
private final Configuration conf = new HdfsConfiguration();
private DistributedFileSystem dfs;
private DistCpOptions options;
private final Path source = new Path("/source");
private final Path target = new Path("/target");
private final long BLOCK_SIZE = 1024;
private final short DATA_NUM = 1;
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATA_NUM).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
dfs.mkdirs(source);
dfs.mkdirs(target);
options = new DistCpOptions(Arrays.asList(source), target);
options.setSyncFolder(true);
options.setDeleteMissing(true);
options.setUseDiff(true, "s1", "s2");
options.appendToConf(conf);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, target.toString());
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, target.toString());
}
@After
public void tearDown() throws Exception {
IOUtils.cleanup(null, dfs);
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test the sync returns false in the following scenarios:
* 1. the source/target dir are not snapshottable dir
* 2. the source/target does not have the given snapshots
* 3. changes have been made in target
*/
@Test
public void testFallback() throws Exception {
// the source/target dir are not snapshottable dir
Assert.assertFalse(DistCpSync.sync(options, conf));
// make sure the source path has been updated to the snapshot path
final Path spath = new Path(source,
HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2");
Assert.assertEquals(spath, options.getSourcePaths().get(0));
// reset source path in options
options.setSourcePaths(Arrays.asList(source));
// the source/target does not have the given snapshots
dfs.allowSnapshot(source);
dfs.allowSnapshot(target);
Assert.assertFalse(DistCpSync.sync(options, conf));
Assert.assertEquals(spath, options.getSourcePaths().get(0));
// reset source path in options
options.setSourcePaths(Arrays.asList(source));
dfs.createSnapshot(source, "s1");
dfs.createSnapshot(source, "s2");
dfs.createSnapshot(target, "s1");
Assert.assertTrue(DistCpSync.sync(options, conf));
// reset source paths in options
options.setSourcePaths(Arrays.asList(source));
// changes have been made in target
final Path subTarget = new Path(target, "sub");
dfs.mkdirs(subTarget);
Assert.assertFalse(DistCpSync.sync(options, conf));
// make sure the source path has been updated to the snapshot path
Assert.assertEquals(spath, options.getSourcePaths().get(0));
// reset source paths in options
options.setSourcePaths(Arrays.asList(source));
dfs.delete(subTarget, true);
Assert.assertTrue(DistCpSync.sync(options, conf));
}
/**
* create some files and directories under the given directory.
* the final subtree looks like this:
* dir/
* foo/ bar/
* d1/ f1 d2/ f2
* f3 f4
*/
private void initData(Path dir) throws Exception {
final Path foo = new Path(dir, "foo");
final Path bar = new Path(dir, "bar");
final Path d1 = new Path(foo, "d1");
final Path f1 = new Path(foo, "f1");
final Path d2 = new Path(bar, "d2");
final Path f2 = new Path(bar, "f2");
final Path f3 = new Path(d1, "f3");
final Path f4 = new Path(d2, "f4");
DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0);
DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE, DATA_NUM, 0);
DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE, DATA_NUM, 0);
DFSTestUtil.createFile(dfs, f4, BLOCK_SIZE, DATA_NUM, 0);
}
/**
* make some changes under the given directory (created in the above way).
* 1. rename dir/foo/d1 to dir/bar/d1
* 2. delete dir/bar/d1/f3
* 3. rename dir/foo to /dir/bar/d1/foo
* 4. delete dir/bar/d1/foo/f1
* 5. create file dir/bar/d1/foo/f1 whose size is 2*BLOCK_SIZE
* 6. append one BLOCK to file dir/bar/f2
* 7. rename dir/bar to dir/foo
*
* Thus after all these ops the subtree looks like this:
* dir/
* foo/
* d1/ f2(A) d2/
* foo/ f4
* f1(new)
*/
private void changeData(Path dir) throws Exception {
final Path foo = new Path(dir, "foo");
final Path bar = new Path(dir, "bar");
final Path d1 = new Path(foo, "d1");
final Path f2 = new Path(bar, "f2");
final Path bar_d1 = new Path(bar, "d1");
dfs.rename(d1, bar_d1);
final Path f3 = new Path(bar_d1, "f3");
dfs.delete(f3, true);
final Path newfoo = new Path(bar_d1, "foo");
dfs.rename(foo, newfoo);
final Path f1 = new Path(newfoo, "f1");
dfs.delete(f1, true);
DFSTestUtil.createFile(dfs, f1, 2 * BLOCK_SIZE, DATA_NUM, 0);
DFSTestUtil.appendFile(dfs, f2, (int) BLOCK_SIZE);
dfs.rename(bar, new Path(dir, "foo"));
}
/**
* Test the basic functionality.
*/
@Test
public void testSync() throws Exception {
initData(source);
initData(target);
dfs.allowSnapshot(source);
dfs.allowSnapshot(target);
dfs.createSnapshot(source, "s1");
dfs.createSnapshot(target, "s1");
// make changes under source
changeData(source);
dfs.createSnapshot(source, "s2");
// before sync, make some further changes on source. this should not affect
// the later distcp since we're copying (s2-s1) to target
final Path toDelete = new Path(source, "foo/d1/foo/f1");
dfs.delete(toDelete, true);
final Path newdir = new Path(source, "foo/d1/foo/newdir");
dfs.mkdirs(newdir);
// do the sync
Assert.assertTrue(DistCpSync.sync(options, conf));
// make sure the source path has been updated to the snapshot path
final Path spath = new Path(source,
HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2");
Assert.assertEquals(spath, options.getSourcePaths().get(0));
// build copy listing
final Path listingPath = new Path("/tmp/META/fileList.seq");
CopyListing listing = new GlobbedCopyListing(conf, new Credentials());
listing.buildListing(listingPath, options);
Map<Text, CopyListingFileStatus> copyListing = getListing(listingPath);
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(conf, null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
stubContext.getContext();
// Enable append
context.getConfiguration().setBoolean(
DistCpOptionSwitch.APPEND.getConfigLabel(), true);
copyMapper.setup(context);
for (Map.Entry<Text, CopyListingFileStatus> entry : copyListing.entrySet()) {
copyMapper.map(entry.getKey(), entry.getValue(), context);
}
// verify that we only copied new appended data of f2 and the new file f1
Assert.assertEquals(BLOCK_SIZE * 3, stubContext.getReporter()
.getCounter(CopyMapper.Counter.BYTESCOPIED).getValue());
// verify the source and target now has the same structure
verifyCopy(dfs.getFileStatus(spath), dfs.getFileStatus(target), false);
}
private Map<Text, CopyListingFileStatus> getListing(Path listingPath)
throws Exception {
SequenceFile.Reader reader = new SequenceFile.Reader(conf,
SequenceFile.Reader.file(listingPath));
Text key = new Text();
CopyListingFileStatus value = new CopyListingFileStatus();
Map<Text, CopyListingFileStatus> values = new HashMap<>();
while (reader.next(key, value)) {
values.put(key, value);
key = new Text();
value = new CopyListingFileStatus();
}
return values;
}
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
throws Exception {
Assert.assertEquals(s.isDirectory(), t.isDirectory());
if (compareName) {
Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
}
if (!s.isDirectory()) {
// verify the file content is the same
byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
Assert.assertArrayEquals(sbytes, tbytes);
} else {
FileStatus[] slist = dfs.listStatus(s.getPath());
FileStatus[] tlist = dfs.listStatus(t.getPath());
Assert.assertEquals(slist.length, tlist.length);
for (int i = 0; i < slist.length; i++) {
verifyCopy(slist[i], tlist[i], true);
}
}
}
/**
* Similar test with testSync, but the "to" snapshot is specified as "."
* @throws Exception
*/
@Test
public void testSyncWithCurrent() throws Exception {
options.setUseDiff(true, "s1", ".");
initData(source);
initData(target);
dfs.allowSnapshot(source);
dfs.allowSnapshot(target);
dfs.createSnapshot(source, "s1");
dfs.createSnapshot(target, "s1");
// make changes under source
changeData(source);
// do the sync
Assert.assertTrue(DistCpSync.sync(options, conf));
// make sure the source path is still unchanged
Assert.assertEquals(source, options.getSourcePaths().get(0));
}
private void initData2(Path dir) throws Exception {
final Path test = new Path(dir, "test");
final Path foo = new Path(dir, "foo");
final Path bar = new Path(dir, "bar");
final Path f1 = new Path(test, "f1");
final Path f2 = new Path(foo, "f2");
final Path f3 = new Path(bar, "f3");
DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE, DATA_NUM, 1L);
DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE, DATA_NUM, 2L);
}
private void changeData2(Path dir) throws Exception {
final Path tmpFoo = new Path(dir, "tmpFoo");
final Path test = new Path(dir, "test");
final Path foo = new Path(dir, "foo");
final Path bar = new Path(dir, "bar");
dfs.rename(test, tmpFoo);
dfs.rename(foo, test);
dfs.rename(bar, foo);
dfs.rename(tmpFoo, bar);
}
@Test
public void testSync2() throws Exception {
initData2(source);
initData2(target);
dfs.allowSnapshot(source);
dfs.allowSnapshot(target);
dfs.createSnapshot(source, "s1");
dfs.createSnapshot(target, "s1");
// make changes under source
changeData2(source);
dfs.createSnapshot(source, "s2");
SnapshotDiffReport report = dfs.getSnapshotDiffReport(source, "s1", "s2");
System.out.println(report);
// do the sync
Assert.assertTrue(DistCpSync.sync(options, conf));
verifyCopy(dfs.getFileStatus(source), dfs.getFileStatus(target), false);
}
private void initData3(Path dir) throws Exception {
final Path test = new Path(dir, "test");
final Path foo = new Path(dir, "foo");
final Path bar = new Path(dir, "bar");
final Path f1 = new Path(test, "file");
final Path f2 = new Path(foo, "file");
final Path f3 = new Path(bar, "file");
DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE * 2, DATA_NUM, 1L);
DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE * 3, DATA_NUM, 2L);
}
private void changeData3(Path dir) throws Exception {
final Path test = new Path(dir, "test");
final Path foo = new Path(dir, "foo");
final Path bar = new Path(dir, "bar");
final Path f1 = new Path(test, "file");
final Path f2 = new Path(foo, "file");
final Path f3 = new Path(bar, "file");
final Path newf1 = new Path(test, "newfile");
final Path newf2 = new Path(foo, "newfile");
final Path newf3 = new Path(bar, "newfile");
dfs.rename(f1, newf1);
dfs.rename(f2, newf2);
dfs.rename(f3, newf3);
}
/**
* Test a case where there are multiple source files with the same name
*/
@Test
public void testSync3() throws Exception {
initData3(source);
initData3(target);
dfs.allowSnapshot(source);
dfs.allowSnapshot(target);
dfs.createSnapshot(source, "s1");
dfs.createSnapshot(target, "s1");
// make changes under source
changeData3(source);
dfs.createSnapshot(source, "s2");
SnapshotDiffReport report = dfs.getSnapshotDiffReport(source, "s1", "s2");
System.out.println(report);
// do the sync
Assert.assertTrue(DistCpSync.sync(options, conf));
verifyCopy(dfs.getFileStatus(source), dfs.getFileStatus(target), false);
}
}
| 14,211 | 34.441397 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestFileBasedCopyListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tools.util.TestDistCpUtils;
import org.apache.hadoop.security.Credentials;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
public class TestFileBasedCopyListing {
private static final Log LOG = LogFactory.getLog(TestFileBasedCopyListing.class);
private static final Credentials CREDENTIALS = new Credentials();
private static final Configuration config = new Configuration();
private static MiniDFSCluster cluster;
private static FileSystem fs;
@BeforeClass
public static void create() throws IOException {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true)
.build();
fs = cluster.getFileSystem();
buildExpectedValuesMap();
}
@AfterClass
public static void destroy() {
if (cluster != null) {
cluster.shutdown();
}
}
private static Map<String, String> map = new HashMap<String, String>();
private static void buildExpectedValuesMap() {
map.put("/file1", "/tmp/singlefile1/file1");
map.put("/file2", "/tmp/singlefile2/file2");
map.put("/file3", "/tmp/multifile/file3");
map.put("/file4", "/tmp/multifile/file4");
map.put("/file5", "/tmp/multifile/file5");
map.put("/multifile/file3", "/tmp/multifile/file3");
map.put("/multifile/file4", "/tmp/multifile/file4");
map.put("/multifile/file5", "/tmp/multifile/file5");
map.put("/Ufile3", "/tmp/Umultifile/Ufile3");
map.put("/Ufile4", "/tmp/Umultifile/Ufile4");
map.put("/Ufile5", "/tmp/Umultifile/Ufile5");
map.put("/dir1", "/tmp/singledir/dir1");
map.put("/singledir/dir1", "/tmp/singledir/dir1");
map.put("/dir2", "/tmp/singledir/dir2");
map.put("/singledir/dir2", "/tmp/singledir/dir2");
map.put("/Udir1", "/tmp/Usingledir/Udir1");
map.put("/Udir2", "/tmp/Usingledir/Udir2");
map.put("/dir2/file6", "/tmp/singledir/dir2/file6");
map.put("/singledir/dir2/file6", "/tmp/singledir/dir2/file6");
map.put("/file7", "/tmp/singledir1/dir3/file7");
map.put("/file8", "/tmp/singledir1/dir3/file8");
map.put("/file9", "/tmp/singledir1/dir3/file9");
map.put("/dir3/file7", "/tmp/singledir1/dir3/file7");
map.put("/dir3/file8", "/tmp/singledir1/dir3/file8");
map.put("/dir3/file9", "/tmp/singledir1/dir3/file9");
map.put("/Ufile7", "/tmp/Usingledir1/Udir3/Ufile7");
map.put("/Ufile8", "/tmp/Usingledir1/Udir3/Ufile8");
map.put("/Ufile9", "/tmp/Usingledir1/Udir3/Ufile9");
}
@Test
public void testSingleFileMissingTarget() {
caseSingleFileMissingTarget(false);
caseSingleFileMissingTarget(true);
}
private void caseSingleFileMissingTarget(boolean sync) {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/singlefile1/file1");
createFiles("/tmp/singlefile1/file1");
runTest(listFile, target, false, sync);
checkResult(listFile, 0);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testSingleFileTargetFile() {
caseSingleFileTargetFile(false);
caseSingleFileTargetFile(true);
}
private void caseSingleFileTargetFile(boolean sync) {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/singlefile1/file1");
createFiles("/tmp/singlefile1/file1", target.toString());
runTest(listFile, target, false, sync);
checkResult(listFile, 0);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testSingleFileTargetDir() {
caseSingleFileTargetDir(false);
caseSingleFileTargetDir(true);
}
private void caseSingleFileTargetDir(boolean sync) {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/singlefile2/file2");
createFiles("/tmp/singlefile2/file2");
mkdirs(target.toString());
runTest(listFile, target, true, sync);
checkResult(listFile, 1);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testSingleDirTargetMissing() {
caseSingleDirTargetMissing(false);
caseSingleDirTargetMissing(true);
}
private void caseSingleDirTargetMissing(boolean sync) {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/singledir");
mkdirs("/tmp/singledir/dir1");
runTest(listFile, target, false, sync);
checkResult(listFile, 1);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testSingleDirTargetPresent() {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/singledir");
mkdirs("/tmp/singledir/dir1");
mkdirs(target.toString());
runTest(listFile, target, true);
checkResult(listFile, 1);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testUpdateSingleDirTargetPresent() {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/Usingledir");
mkdirs("/tmp/Usingledir/Udir1");
mkdirs(target.toString());
runTest(listFile, target, true, true);
checkResult(listFile, 1);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testMultiFileTargetPresent() {
caseMultiFileTargetPresent(false);
caseMultiFileTargetPresent(true);
}
private void caseMultiFileTargetPresent(boolean sync) {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
createFiles("/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
mkdirs(target.toString());
runTest(listFile, target, true, sync);
checkResult(listFile, 3);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testMultiFileTargetMissing() {
caseMultiFileTargetMissing(false);
caseMultiFileTargetMissing(true);
}
private void caseMultiFileTargetMissing(boolean sync) {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
createFiles("/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
runTest(listFile, target, false, sync);
checkResult(listFile, 3);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testMultiDirTargetPresent() {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/multifile", "/tmp/singledir");
createFiles("/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
mkdirs(target.toString(), "/tmp/singledir/dir1");
runTest(listFile, target, true);
checkResult(listFile, 4);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testUpdateMultiDirTargetPresent() {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/Umultifile", "/tmp/Usingledir");
createFiles("/tmp/Umultifile/Ufile3", "/tmp/Umultifile/Ufile4", "/tmp/Umultifile/Ufile5");
mkdirs(target.toString(), "/tmp/Usingledir/Udir1");
runTest(listFile, target, true);
checkResult(listFile, 4);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testMultiDirTargetMissing() {
caseMultiDirTargetMissing(false);
caseMultiDirTargetMissing(true);
}
private void caseMultiDirTargetMissing(boolean sync) {
try {
Path listFile = new Path("/tmp/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/multifile", "/tmp/singledir");
createFiles("/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
mkdirs("/tmp/singledir/dir1");
runTest(listFile, target, sync);
checkResult(listFile, 4);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test
public void testGlobTargetMissingSingleLevel() {
caseGlobTargetMissingSingleLevel(false);
caseGlobTargetMissingSingleLevel(true);
}
private void caseGlobTargetMissingSingleLevel(boolean sync) {
try {
Path listFile = new Path("/tmp1/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/*");
createFiles("/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
createFiles("/tmp/singledir/dir2/file6");
runTest(listFile, target, sync);
checkResult(listFile, 5);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
TestDistCpUtils.delete(fs, "/tmp1");
}
}
@Test
public void testGlobTargetMissingMultiLevel() {
caseGlobTargetMissingMultiLevel(false);
caseGlobTargetMissingMultiLevel(true);
}
private void caseGlobTargetMissingMultiLevel(boolean sync) {
try {
Path listFile = new Path("/tmp1/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/*/*");
createFiles("/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
createFiles("/tmp/singledir1/dir3/file7", "/tmp/singledir1/dir3/file8",
"/tmp/singledir1/dir3/file9");
runTest(listFile, target, sync);
checkResult(listFile, 6);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
TestDistCpUtils.delete(fs, "/tmp1");
}
}
@Test
public void testGlobTargetDirMultiLevel() {
try {
Path listFile = new Path("/tmp1/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/*/*");
createFiles("/tmp/multifile/file3", "/tmp/multifile/file4", "/tmp/multifile/file5");
createFiles("/tmp/singledir1/dir3/file7", "/tmp/singledir1/dir3/file8",
"/tmp/singledir1/dir3/file9");
mkdirs(target.toString());
runTest(listFile, target, true);
checkResult(listFile, 6);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
TestDistCpUtils.delete(fs, "/tmp1");
}
}
@Test
public void testUpdateGlobTargetDirMultiLevel() {
try {
Path listFile = new Path("/tmp1/listing");
Path target = new Path("/tmp/target");
addEntries(listFile, "/tmp/*/*");
createFiles("/tmp/Umultifile/Ufile3", "/tmp/Umultifile/Ufile4", "/tmp/Umultifile/Ufile5");
createFiles("/tmp/Usingledir1/Udir3/Ufile7", "/tmp/Usingledir1/Udir3/Ufile8",
"/tmp/Usingledir1/Udir3/Ufile9");
mkdirs(target.toString());
runTest(listFile, target, true);
checkResult(listFile, 6);
} catch (IOException e) {
LOG.error("Exception encountered while testing build listing", e);
Assert.fail("build listing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
TestDistCpUtils.delete(fs, "/tmp1");
}
}
private void addEntries(Path listFile, String... entries) throws IOException {
OutputStream out = fs.create(listFile);
try {
for (String entry : entries){
out.write(entry.getBytes());
out.write("\n".getBytes());
}
} finally {
out.close();
}
}
private void createFiles(String... entries) throws IOException {
for (String entry : entries){
OutputStream out = fs.create(new Path(entry));
try {
out.write(entry.getBytes());
out.write("\n".getBytes());
} finally {
out.close();
}
}
}
private void mkdirs(String... entries) throws IOException {
for (String entry : entries){
fs.mkdirs(new Path(entry));
}
}
private void runTest(Path listFile, Path target,
boolean targetExists) throws IOException {
runTest(listFile, target, targetExists, true);
}
private void runTest(Path listFile, Path target, boolean targetExists,
boolean sync) throws IOException {
CopyListing listing = new FileBasedCopyListing(config, CREDENTIALS);
DistCpOptions options = new DistCpOptions(listFile, target);
options.setSyncFolder(sync);
options.setTargetPathExists(targetExists);
listing.buildListing(listFile, options);
}
private void checkResult(Path listFile, int count) throws IOException {
if (count == 0) {
return;
}
int recCount = 0;
SequenceFile.Reader reader = new SequenceFile.Reader(config,
SequenceFile.Reader.file(listFile));
try {
Text relPath = new Text();
CopyListingFileStatus fileStatus = new CopyListingFileStatus();
while (reader.next(relPath, fileStatus)) {
if (fileStatus.isDirectory() && relPath.toString().equals("")) {
// ignore root with empty relPath, which is an entry to be
// used for preserving root attributes etc.
continue;
}
Assert.assertEquals(fileStatus.getPath().toUri().getPath(), map.get(relPath.toString()));
recCount++;
}
} finally {
IOUtils.closeStream(reader);
}
Assert.assertEquals(recCount, count);
}
}
| 16,972 | 29.86 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import static org.mockito.Mockito.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.tools.util.TestDistCpUtils;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.junit.Test;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@RunWith(value = Parameterized.class)
public class TestCopyListing extends SimpleCopyListing {
private static final Log LOG = LogFactory.getLog(TestCopyListing.class);
private static final Credentials CREDENTIALS = new Credentials();
private static final Configuration config = new Configuration();
private static MiniDFSCluster cluster;
@BeforeClass
public static void create() throws IOException {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true)
.build();
}
@AfterClass
public static void destroy() {
if (cluster != null) {
cluster.shutdown();
}
}
@Parameters
public static Collection<Object[]> data() {
Object[][] data = new Object[][] { { 1 }, { 2 }, { 10 }, { 20} };
return Arrays.asList(data);
}
public TestCopyListing(int numListstatusThreads) {
super(config, CREDENTIALS, numListstatusThreads);
}
protected TestCopyListing(Configuration configuration) {
super(configuration, CREDENTIALS);
}
@Override
protected long getBytesToCopy() {
return 0;
}
@Override
protected long getNumberOfPaths() {
return 0;
}
@Test(timeout=10000)
public void testMultipleSrcToFile() {
FileSystem fs = null;
try {
fs = FileSystem.get(getConf());
List<Path> srcPaths = new ArrayList<Path>();
srcPaths.add(new Path("/tmp/in/1"));
srcPaths.add(new Path("/tmp/in/2"));
Path target = new Path("/tmp/out/1");
TestDistCpUtils.createFile(fs, "/tmp/in/1");
TestDistCpUtils.createFile(fs, "/tmp/in/2");
fs.mkdirs(target);
DistCpOptions options = new DistCpOptions(srcPaths, target);
validatePaths(options);
TestDistCpUtils.delete(fs, "/tmp");
//No errors
target = new Path("/tmp/out/1");
fs.create(target).close();
options = new DistCpOptions(srcPaths, target);
try {
validatePaths(options);
Assert.fail("Invalid inputs accepted");
} catch (InvalidInputException ignore) { }
TestDistCpUtils.delete(fs, "/tmp");
srcPaths.clear();
srcPaths.add(new Path("/tmp/in/1"));
fs.mkdirs(new Path("/tmp/in/1"));
target = new Path("/tmp/out/1");
fs.create(target).close();
options = new DistCpOptions(srcPaths, target);
try {
validatePaths(options);
Assert.fail("Invalid inputs accepted");
} catch (InvalidInputException ignore) { }
TestDistCpUtils.delete(fs, "/tmp");
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test input validation failed");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test(timeout=10000)
public void testDuplicates() {
FileSystem fs = null;
try {
fs = FileSystem.get(getConf());
List<Path> srcPaths = new ArrayList<Path>();
srcPaths.add(new Path("/tmp/in/*/*"));
TestDistCpUtils.createFile(fs, "/tmp/in/src1/1.txt");
TestDistCpUtils.createFile(fs, "/tmp/in/src2/1.txt");
Path target = new Path("/tmp/out");
Path listingFile = new Path("/tmp/list");
DistCpOptions options = new DistCpOptions(srcPaths, target);
CopyListing listing = CopyListing.getCopyListing(getConf(), CREDENTIALS, options);
try {
listing.buildListing(listingFile, options);
Assert.fail("Duplicates not detected");
} catch (DuplicateFileException ignore) {
}
} catch (IOException e) {
LOG.error("Exception encountered in test", e);
Assert.fail("Test failed " + e.getMessage());
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test(timeout=10000)
public void testBuildListing() {
FileSystem fs = null;
try {
fs = FileSystem.get(getConf());
List<Path> srcPaths = new ArrayList<Path>();
Path p1 = new Path("/tmp/in/1");
Path p2 = new Path("/tmp/in/2");
Path p3 = new Path("/tmp/in2/2");
Path target = new Path("/tmp/out/1");
srcPaths.add(p1.getParent());
srcPaths.add(p3.getParent());
TestDistCpUtils.createFile(fs, "/tmp/in/1");
TestDistCpUtils.createFile(fs, "/tmp/in/2");
TestDistCpUtils.createFile(fs, "/tmp/in2/2");
fs.mkdirs(target);
OutputStream out = fs.create(p1);
out.write("ABC".getBytes());
out.close();
out = fs.create(p2);
out.write("DEF".getBytes());
out.close();
out = fs.create(p3);
out.write("GHIJ".getBytes());
out.close();
Path listingFile = new Path("/tmp/file");
DistCpOptions options = new DistCpOptions(srcPaths, target);
options.setSyncFolder(true);
CopyListing listing = new SimpleCopyListing(getConf(), CREDENTIALS);
try {
listing.buildListing(listingFile, options);
Assert.fail("Duplicates not detected");
} catch (DuplicateFileException ignore) {
}
Assert.assertEquals(listing.getBytesToCopy(), 10);
Assert.assertEquals(listing.getNumberOfPaths(), 3);
TestDistCpUtils.delete(fs, "/tmp");
try {
listing.buildListing(listingFile, options);
Assert.fail("Invalid input not detected");
} catch (InvalidInputException ignore) {
}
TestDistCpUtils.delete(fs, "/tmp");
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test build listing failed");
} finally {
TestDistCpUtils.delete(fs, "/tmp");
}
}
@Test(timeout=10000)
public void testBuildListingForSingleFile() {
FileSystem fs = null;
String testRootString = "/singleFileListing";
Path testRoot = new Path(testRootString);
SequenceFile.Reader reader = null;
try {
fs = FileSystem.get(getConf());
if (fs.exists(testRoot))
TestDistCpUtils.delete(fs, testRootString);
Path sourceFile = new Path(testRoot, "/source/foo/bar/source.txt");
Path decoyFile = new Path(testRoot, "/target/moo/source.txt");
Path targetFile = new Path(testRoot, "/target/moo/target.txt");
TestDistCpUtils.createFile(fs, sourceFile.toString());
TestDistCpUtils.createFile(fs, decoyFile.toString());
TestDistCpUtils.createFile(fs, targetFile.toString());
List<Path> srcPaths = new ArrayList<Path>();
srcPaths.add(sourceFile);
DistCpOptions options = new DistCpOptions(srcPaths, targetFile);
CopyListing listing = new SimpleCopyListing(getConf(), CREDENTIALS);
final Path listFile = new Path(testRoot, "/tmp/fileList.seq");
listing.buildListing(listFile, options);
reader = new SequenceFile.Reader(getConf(), SequenceFile.Reader.file(listFile));
CopyListingFileStatus fileStatus = new CopyListingFileStatus();
Text relativePath = new Text();
Assert.assertTrue(reader.next(relativePath, fileStatus));
Assert.assertTrue(relativePath.toString().equals(""));
}
catch (Exception e) {
Assert.fail("Unexpected exception encountered.");
LOG.error("Unexpected exception: ", e);
}
finally {
TestDistCpUtils.delete(fs, testRootString);
IOUtils.closeStream(reader);
}
}
@Test
public void testFailOnCloseError() throws IOException {
File inFile = File.createTempFile("TestCopyListingIn", null);
inFile.deleteOnExit();
File outFile = File.createTempFile("TestCopyListingOut", null);
outFile.deleteOnExit();
List<Path> srcs = new ArrayList<Path>();
srcs.add(new Path(inFile.toURI()));
Exception expectedEx = new IOException("boom");
SequenceFile.Writer writer = mock(SequenceFile.Writer.class);
doThrow(expectedEx).when(writer).close();
SimpleCopyListing listing = new SimpleCopyListing(getConf(), CREDENTIALS);
DistCpOptions options = new DistCpOptions(srcs, new Path(outFile.toURI()));
Exception actualEx = null;
try {
listing.doBuildListing(writer, options);
} catch (Exception e) {
actualEx = e;
}
Assert.assertNotNull("close writer didn't fail", actualEx);
Assert.assertEquals(expectedEx, actualEx);
}
}
| 9,926 | 32.765306 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestTrueCopyFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.junit.Test;
public class TestTrueCopyFilter {
@Test
public void testShouldCopy() {
Assert.assertTrue(new TrueCopyFilter().shouldCopy(new Path("fake")));
}
@Test
public void testShouldCopyWithNull() {
Assert.assertTrue(new TrueCopyFilter().shouldCopy(new Path("fake")));
}
}
| 1,209 | 31.702703 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestRegexCopyFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
public class TestRegexCopyFilter {
@Test
public void testShouldCopyTrue() {
List<Pattern> filters = new ArrayList<>();
filters.add(Pattern.compile("user"));
RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile");
regexCopyFilter.setFilters(filters);
Path shouldCopyPath = new Path("/user/bar");
Assert.assertTrue(regexCopyFilter.shouldCopy(shouldCopyPath));
}
@Test
public void testShouldCopyFalse() {
List<Pattern> filters = new ArrayList<>();
filters.add(Pattern.compile(".*test.*"));
RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile");
regexCopyFilter.setFilters(filters);
Path shouldNotCopyPath = new Path("/user/testing");
Assert.assertFalse(regexCopyFilter.shouldCopy(shouldNotCopyPath));
}
@Test
public void testShouldCopyWithMultipleFilters() {
List<Pattern> filters = new ArrayList<>();
filters.add(Pattern.compile(".*test.*"));
filters.add(Pattern.compile("/user/b.*"));
filters.add(Pattern.compile(".*_SUCCESS"));
List<Path> toCopy = getTestPaths();
int shouldCopyCount = 0;
RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile");
regexCopyFilter.setFilters(filters);
for (Path path: toCopy) {
if (regexCopyFilter.shouldCopy(path)) {
shouldCopyCount++;
}
}
Assert.assertEquals(2, shouldCopyCount);
}
@Test
public void testShouldExcludeAll() {
List<Pattern> filters = new ArrayList<>();
filters.add(Pattern.compile(".*test.*"));
filters.add(Pattern.compile("/user/b.*"));
filters.add(Pattern.compile(".*")); // exclude everything
List<Path> toCopy = getTestPaths();
int shouldCopyCount = 0;
RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile");
regexCopyFilter.setFilters(filters);
for (Path path: toCopy) {
if (regexCopyFilter.shouldCopy(path)) {
shouldCopyCount++;
}
}
Assert.assertEquals(0, shouldCopyCount);
}
private List<Path> getTestPaths() {
List<Path> toCopy = new ArrayList<>();
toCopy.add(new Path("/user/bar"));
toCopy.add(new Path("/user/foo/_SUCCESS"));
toCopy.add(new Path("/hive/test_data"));
toCopy.add(new Path("test"));
toCopy.add(new Path("/user/foo/bar"));
toCopy.add(new Path("/mapred/.staging_job"));
return toCopy;
}
}
| 3,399 | 28.824561 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpViewFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.viewfs.*;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.util.TestDistCpUtils;
import org.apache.hadoop.fs.FsConstants;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
public class TestDistCpViewFs {
private static final Log LOG = LogFactory.getLog(TestDistCpViewFs.class);
private static FileSystem fs;
private static Path listFile;
private static Path target;
private static String root;
private static Configuration getConf() throws URISyntaxException {
Configuration conf = new Configuration();
conf.set("mapred.job.tracker", "local");
conf.set("fs.default.name", "file:///");
return conf;
}
@BeforeClass
public static void setup() throws URISyntaxException{
try {
Path fswd = FileSystem.get(getConf()).getWorkingDirectory();
Configuration vConf = ViewFileSystemTestSetup.createConfig(false);
ConfigUtil.addLink(vConf, "/usr", new URI(fswd.toString()));
fs = FileSystem.get(FsConstants.VIEWFS_URI, vConf);
fs.setWorkingDirectory(new Path("/usr"));
listFile = new Path("target/tmp/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
target = new Path("target/tmp/target").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
root = new Path("target/tmp").makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString();
TestDistCpUtils.delete(fs, root);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
}
}
@Test
public void testSingleFileMissingTarget() {
caseSingleFileMissingTarget(false);
caseSingleFileMissingTarget(true);
}
private void caseSingleFileMissingTarget(boolean sync) {
try {
addEntries(listFile, "singlefile1/file1");
createFiles("singlefile1/file1");
runTest(listFile, target, false, sync);
checkResult(target, 1);
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testSingleFileTargetFile() {
caseSingleFileTargetFile(false);
caseSingleFileTargetFile(true);
}
private void caseSingleFileTargetFile(boolean sync) {
try {
addEntries(listFile, "singlefile1/file1");
createFiles("singlefile1/file1", target.toString());
runTest(listFile, target, false, sync);
checkResult(target, 1);
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testSingleFileTargetDir() {
caseSingleFileTargetDir(false);
caseSingleFileTargetDir(true);
}
private void caseSingleFileTargetDir(boolean sync) {
try {
addEntries(listFile, "singlefile2/file2");
createFiles("singlefile2/file2");
mkdirs(target.toString());
runTest(listFile, target, true, sync);
checkResult(target, 1, "file2");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testSingleDirTargetMissing() {
caseSingleDirTargetMissing(false);
caseSingleDirTargetMissing(true);
}
private void caseSingleDirTargetMissing(boolean sync) {
try {
addEntries(listFile, "singledir");
mkdirs(root + "/singledir/dir1");
runTest(listFile, target, false, sync);
checkResult(target, 1, "dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testSingleDirTargetPresent() {
try {
addEntries(listFile, "singledir");
mkdirs(root + "/singledir/dir1");
mkdirs(target.toString());
runTest(listFile, target, true, false);
checkResult(target, 1, "singledir/dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testUpdateSingleDirTargetPresent() {
try {
addEntries(listFile, "Usingledir");
mkdirs(root + "/Usingledir/Udir1");
mkdirs(target.toString());
runTest(listFile, target, true, true);
checkResult(target, 1, "Udir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testMultiFileTargetPresent() {
caseMultiFileTargetPresent(false);
caseMultiFileTargetPresent(true);
}
private void caseMultiFileTargetPresent(boolean sync) {
try {
addEntries(listFile, "multifile/file3", "multifile/file4", "multifile/file5");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
mkdirs(target.toString());
runTest(listFile, target, true, sync);
checkResult(target, 3, "file3", "file4", "file5");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testMultiFileTargetMissing() {
caseMultiFileTargetMissing(false);
caseMultiFileTargetMissing(true);
}
private void caseMultiFileTargetMissing(boolean sync) {
try {
addEntries(listFile, "multifile/file3", "multifile/file4", "multifile/file5");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
runTest(listFile, target, false, sync);
checkResult(target, 3, "file3", "file4", "file5");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testMultiDirTargetPresent() {
try {
addEntries(listFile, "multifile", "singledir");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
mkdirs(target.toString(), root + "/singledir/dir1");
runTest(listFile, target, true, false);
checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5", "singledir/dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testUpdateMultiDirTargetPresent() {
try {
addEntries(listFile, "Umultifile", "Usingledir");
createFiles("Umultifile/Ufile3", "Umultifile/Ufile4", "Umultifile/Ufile5");
mkdirs(target.toString(), root + "/Usingledir/Udir1");
runTest(listFile, target, true, true);
checkResult(target, 4, "Ufile3", "Ufile4", "Ufile5", "Udir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testMultiDirTargetMissing() {
try {
addEntries(listFile, "multifile", "singledir");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
mkdirs(root + "/singledir/dir1");
runTest(listFile, target, false, false);
checkResult(target, 2, "multifile/file3", "multifile/file4",
"multifile/file5", "singledir/dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testUpdateMultiDirTargetMissing() {
try {
addEntries(listFile, "multifile", "singledir");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
mkdirs(root + "/singledir/dir1");
runTest(listFile, target, false, true);
checkResult(target, 4, "file3", "file4", "file5", "dir1");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
}
}
@Test
public void testGlobTargetMissingSingleLevel() {
try {
Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
addEntries(listFile, "*");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
createFiles("singledir/dir2/file6");
runTest(listFile, target, false, false);
checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5",
"singledir/dir2/file6");
} catch (IOException e) {
LOG.error("Exception encountered while testing distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test
public void testUpdateGlobTargetMissingSingleLevel() {
try {
Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
addEntries(listFile, "*");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
createFiles("singledir/dir2/file6");
runTest(listFile, target, false, true);
checkResult(target, 4, "file3", "file4", "file5", "dir2/file6");
} catch (IOException e) {
LOG.error("Exception encountered while running distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test
public void testGlobTargetMissingMultiLevel() {
try {
Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
addEntries(listFile, "*/*");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
createFiles("singledir1/dir3/file7", "singledir1/dir3/file8",
"singledir1/dir3/file9");
runTest(listFile, target, false, false);
checkResult(target, 4, "file3", "file4", "file5",
"dir3/file7", "dir3/file8", "dir3/file9");
} catch (IOException e) {
LOG.error("Exception encountered while running distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
@Test
public void testUpdateGlobTargetMissingMultiLevel() {
try {
Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
addEntries(listFile, "*/*");
createFiles("multifile/file3", "multifile/file4", "multifile/file5");
createFiles("singledir1/dir3/file7", "singledir1/dir3/file8",
"singledir1/dir3/file9");
runTest(listFile, target, false, true);
checkResult(target, 6, "file3", "file4", "file5",
"file7", "file8", "file9");
} catch (IOException e) {
LOG.error("Exception encountered while running distcp", e);
Assert.fail("distcp failure");
} finally {
TestDistCpUtils.delete(fs, root);
TestDistCpUtils.delete(fs, "target/tmp1");
}
}
private void addEntries(Path listFile, String... entries) throws IOException {
OutputStream out = fs.create(listFile);
try {
for (String entry : entries){
out.write((root + "/" + entry).getBytes());
out.write("\n".getBytes());
}
} finally {
out.close();
}
}
private void createFiles(String... entries) throws IOException {
String e;
for (String entry : entries){
if ((new Path(entry)).isAbsolute())
{
e = entry;
}
else
{
e = root + "/" + entry;
}
OutputStream out = fs.create(new Path(e));
try {
out.write((e).getBytes());
out.write("\n".getBytes());
} finally {
out.close();
}
}
}
private void mkdirs(String... entries) throws IOException {
for (String entry : entries){
fs.mkdirs(new Path(entry));
}
}
private void runTest(Path listFile, Path target, boolean targetExists,
boolean sync) throws IOException {
DistCpOptions options = new DistCpOptions(listFile, target);
options.setSyncFolder(sync);
options.setTargetPathExists(targetExists);
try {
new DistCp(getConf(), options).execute();
} catch (Exception e) {
LOG.error("Exception encountered ", e);
throw new IOException(e);
}
}
private void checkResult(Path target, int count, String... relPaths) throws IOException {
Assert.assertEquals(count, fs.listStatus(target).length);
if (relPaths == null || relPaths.length == 0) {
Assert.assertTrue(target.toString(), fs.exists(target));
return;
}
for (String relPath : relPaths) {
Assert.assertTrue(new Path(target, relPath).toString(), fs.exists(new Path(target, relPath)));
}
}
}
| 14,599 | 28.918033 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestExternalCall.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.tools.util.TestDistCpUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.security.Permission;
public class TestExternalCall {
private static final Log LOG = LogFactory.getLog(TestExternalCall.class);
private static FileSystem fs;
private static String root;
private static Configuration getConf() {
Configuration conf = new Configuration();
conf.set("fs.default.name", "file:///");
conf.set("mapred.job.tracker", "local");
return conf;
}
@Before
public void setup() {
securityManager = System.getSecurityManager();
System.setSecurityManager(new NoExitSecurityManager());
try {
fs = FileSystem.get(getConf());
root = new Path("target/tmp").makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString();
TestDistCpUtils.delete(fs, root);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
}
}
@After
public void tearDown() {
System.setSecurityManager(securityManager);
}
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception
*/
@Test
public void testCleanup() throws Exception {
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure = createFile("tmp.txt");
Path target = createFile("target.txt");
DistCp distcp = new DistCp(conf, null);
String[] arg = { soure.toString(), target.toString() };
distcp.run(arg);
Assert.assertTrue(fs.exists(target));
}
private Path createFile(String fname) throws IOException {
Path result = new Path(root + "/" + fname);
OutputStream out = fs.create(result);
try {
out.write((root + "/" + fname).getBytes());
out.write("\n".getBytes());
} finally {
out.close();
}
return result;
}
/**
* test main method of DistCp. Method should to call System.exit().
*
*/
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure = createFile("tmp.txt");
Path target = createFile("target.txt");
try {
String[] arg = {target.toString(),soure.toString()};
DistCp.main(arg);
Assert.fail();
} catch (ExitException t) {
Assert.assertTrue(fs.exists(target));
Assert.assertEquals(t.status, 0);
Assert.assertEquals(
stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
}
}
private SecurityManager securityManager;
protected static class ExitException extends SecurityException {
private static final long serialVersionUID = -1982617086752946683L;
public final int status;
public ExitException(int status) {
super("There is no escape!");
this.status = status;
}
}
private static class NoExitSecurityManager extends SecurityManager {
@Override
public void checkPermission(Permission perm) {
// allow anything.
}
@Override
public void checkPermission(Permission perm, Object context) {
// allow anything.
}
@Override
public void checkExit(int status) {
super.checkExit(status);
throw new ExitException(status);
}
}
}
| 4,744 | 27.413174 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.conf.Configuration;
import java.util.List;
import java.util.ArrayList;
import java.io.IOException;
public class StubContext {
private StubStatusReporter reporter = new StubStatusReporter();
private RecordReader<Text, CopyListingFileStatus> reader;
private StubInMemoryWriter writer = new StubInMemoryWriter();
private Mapper<Text, CopyListingFileStatus, Text, Text>.Context mapperContext;
public StubContext(Configuration conf,
RecordReader<Text, CopyListingFileStatus> reader, int taskId)
throws IOException, InterruptedException {
WrappedMapper<Text, CopyListingFileStatus, Text, Text> wrappedMapper
= new WrappedMapper<Text, CopyListingFileStatus, Text, Text>();
MapContextImpl<Text, CopyListingFileStatus, Text, Text> contextImpl
= new MapContextImpl<Text, CopyListingFileStatus, Text, Text>(conf,
getTaskAttemptID(taskId), reader, writer,
null, reporter, null);
this.reader = reader;
this.mapperContext = wrappedMapper.getMapContext(contextImpl);
}
public Mapper<Text, CopyListingFileStatus, Text, Text>.Context getContext() {
return mapperContext;
}
public StatusReporter getReporter() {
return reporter;
}
public RecordReader<Text, CopyListingFileStatus> getReader() {
return reader;
}
public StubInMemoryWriter getWriter() {
return writer;
}
public static class StubStatusReporter extends StatusReporter {
private Counters counters = new Counters();
public StubStatusReporter() {
/*
final CounterGroup counterGroup
= new CounterGroup("FileInputFormatCounters",
"FileInputFormatCounters");
counterGroup.addCounter(new Counter("BYTES_READ",
"BYTES_READ",
0));
counters.addGroup(counterGroup);
*/
}
@Override
public Counter getCounter(Enum<?> name) {
return counters.findCounter(name);
}
@Override
public Counter getCounter(String group, String name) {
return counters.findCounter(group, name);
}
@Override
public void progress() {}
@Override
public float getProgress() {
return 0F;
}
@Override
public void setStatus(String status) {}
}
public static class StubInMemoryWriter extends RecordWriter<Text, Text> {
List<Text> keys = new ArrayList<Text>();
List<Text> values = new ArrayList<Text>();
@Override
public void write(Text key, Text value) throws IOException, InterruptedException {
keys.add(key);
values.add(value);
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
}
public List<Text> keys() {
return keys;
}
public List<Text> values() {
return values;
}
}
public static TaskAttemptID getTaskAttemptID(int taskId) {
return new TaskAttemptID("", 0, TaskType.MAP, taskId, 0);
}
}
| 4,135 | 28.542857 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import java.io.IOException;
import java.io.OutputStream;
import java.util.EnumSet;
import java.util.Random;
import java.util.Stack;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpOptionSwitch;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestDistCpUtils {
private static final Log LOG = LogFactory.getLog(TestDistCpUtils.class);
private static final Configuration config = new Configuration();
private static MiniDFSCluster cluster;
private static final FsPermission fullPerm = new FsPermission((short) 777);
private static final FsPermission almostFullPerm = new FsPermission((short) 666);
private static final FsPermission noPerm = new FsPermission((short) 0);
@BeforeClass
public static void create() throws IOException {
cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(1)
.format(true)
.build();
}
@AfterClass
public static void destroy() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testGetRelativePathRoot() {
Path root = new Path("/");
Path child = new Path("/a");
Assert.assertEquals(DistCpUtils.getRelativePath(root, child), "/a");
}
@Test
public void testGetRelativePath() {
Path root = new Path("/tmp/abc");
Path child = new Path("/tmp/abc/xyz/file");
Assert.assertEquals(DistCpUtils.getRelativePath(root, child), "/xyz/file");
}
@Test
public void testPackAttributes() {
EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);
Assert.assertEquals(DistCpUtils.packAttributes(attributes), "");
attributes.add(FileAttribute.REPLICATION);
Assert.assertEquals(DistCpUtils.packAttributes(attributes), "R");
attributes.add(FileAttribute.BLOCKSIZE);
Assert.assertEquals(DistCpUtils.packAttributes(attributes), "RB");
attributes.add(FileAttribute.USER);
attributes.add(FileAttribute.CHECKSUMTYPE);
Assert.assertEquals(DistCpUtils.packAttributes(attributes), "RBUC");
attributes.add(FileAttribute.GROUP);
Assert.assertEquals(DistCpUtils.packAttributes(attributes), "RBUGC");
attributes.add(FileAttribute.PERMISSION);
Assert.assertEquals(DistCpUtils.packAttributes(attributes), "RBUGPC");
attributes.add(FileAttribute.TIMES);
Assert.assertEquals(DistCpUtils.packAttributes(attributes), "RBUGPCT");
}
public void testUnpackAttributes() {
EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class);
Assert.assertEquals(attributes, DistCpUtils.unpackAttributes("RCBUGPAXT"));
attributes.remove(FileAttribute.REPLICATION);
attributes.remove(FileAttribute.CHECKSUMTYPE);
attributes.remove(FileAttribute.ACL);
attributes.remove(FileAttribute.XATTR);
Assert.assertEquals(attributes, DistCpUtils.unpackAttributes("BUGPT"));
attributes.remove(FileAttribute.TIMES);
Assert.assertEquals(attributes, DistCpUtils.unpackAttributes("BUGP"));
attributes.remove(FileAttribute.BLOCKSIZE);
Assert.assertEquals(attributes, DistCpUtils.unpackAttributes("UGP"));
attributes.remove(FileAttribute.GROUP);
Assert.assertEquals(attributes, DistCpUtils.unpackAttributes("UP"));
attributes.remove(FileAttribute.USER);
Assert.assertEquals(attributes, DistCpUtils.unpackAttributes("P"));
attributes.remove(FileAttribute.PERMISSION);
Assert.assertEquals(attributes, DistCpUtils.unpackAttributes(""));
}
@Test
public void testPreserveDefaults() throws IOException {
FileSystem fs = FileSystem.get(config);
// preserve replication, block size, user, group, permission,
// checksum type and timestamps
EnumSet<FileAttribute> attributes =
DistCpUtils.unpackAttributes(
DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.substring(1));
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreserveNothingOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);
Path dst = new Path("/tmp/abc");
Path src = new Path("/tmp/src");
createDirectory(fs, src);
createDirectory(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertTrue(dstStatus.getAccessTime() == 100);
Assert.assertTrue(dstStatus.getModificationTime() == 100);
Assert.assertTrue(dstStatus.getReplication() == 0);
}
@Test
public void testPreservePermissionOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.PERMISSION);
Path dst = new Path("/tmp/abc");
Path src = new Path("/tmp/src");
createDirectory(fs, src);
createDirectory(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
}
@Test
public void testPreserveGroupOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.GROUP);
Path dst = new Path("/tmp/abc");
Path src = new Path("/tmp/src");
createDirectory(fs, src);
createDirectory(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
}
@Test
public void testPreserveUserOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.USER);
Path dst = new Path("/tmp/abc");
Path src = new Path("/tmp/src");
createDirectory(fs, src);
createDirectory(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
}
@Test
public void testPreserveReplicationOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);
Path dst = new Path("/tmp/abc");
Path src = new Path("/tmp/src");
createDirectory(fs, src);
createDirectory(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
// Replication shouldn't apply to dirs so this should still be 0 == 0
Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreserveTimestampOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.TIMES);
Path dst = new Path("/tmp/abc");
Path src = new Path("/tmp/src");
createDirectory(fs, src);
createDirectory(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
}
@Test
public void testPreserveNothingOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreservePermissionOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.PERMISSION);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreserveGroupOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.GROUP);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreserveUserOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.USER);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreserveReplicationOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreserveTimestampOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.TIMES);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreserveOnFileUpwardRecursion() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class);
// Remove ACL because tests run with dfs.namenode.acls.enabled false
attributes.remove(FileAttribute.ACL);
Path src = new Path("/tmp/src2");
Path f0 = new Path("/f0");
Path f1 = new Path("/d1/f1");
Path f2 = new Path("/d1/d2/f2");
Path d1 = new Path("/d1/");
Path d2 = new Path("/d1/d2/");
createFile(fs, src);
createFile(fs, f0);
createFile(fs, f1);
createFile(fs, f2);
fs.setPermission(src, almostFullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(d1, fullPerm);
fs.setOwner(d1, "anybody", "anybody-group");
fs.setTimes(d1, 400, 400);
fs.setReplication(d1, (short) 3);
fs.setPermission(d2, fullPerm);
fs.setOwner(d2, "anybody", "anybody-group");
fs.setTimes(d2, 300, 300);
fs.setReplication(d2, (short) 3);
fs.setPermission(f0, fullPerm);
fs.setOwner(f0, "anybody", "anybody-group");
fs.setTimes(f0, 200, 200);
fs.setReplication(f0, (short) 3);
fs.setPermission(f1, fullPerm);
fs.setOwner(f1, "anybody", "anybody-group");
fs.setTimes(f1, 200, 200);
fs.setReplication(f1, (short) 3);
fs.setPermission(f2, fullPerm);
fs.setOwner(f2, "anybody", "anybody-group");
fs.setTimes(f2, 200, 200);
fs.setReplication(f2, (short) 3);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, f2, srcStatus, attributes, false);
cluster.triggerHeartbeats();
// FileStatus.equals only compares path field, must explicitly compare all fields
// attributes of src -> f2 ? should be yes
CopyListingFileStatus f2Status = new CopyListingFileStatus(fs.getFileStatus(f2));
Assert.assertTrue(srcStatus.getPermission().equals(f2Status.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(f2Status.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(f2Status.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == f2Status.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == f2Status.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() == f2Status.getReplication());
// attributes of src -> f1 ? should be no
CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1));
Assert.assertFalse(srcStatus.getPermission().equals(f1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f1Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f1Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f1Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f1Status.getReplication());
// attributes of src -> f0 ? should be no
CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0));
Assert.assertFalse(srcStatus.getPermission().equals(f0Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f0Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f0Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f0Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f0Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f0Status.getReplication());
// attributes of src -> d2 ? should be no
CopyListingFileStatus d2Status = new CopyListingFileStatus(fs.getFileStatus(d2));
Assert.assertFalse(srcStatus.getPermission().equals(d2Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d2Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d2Status.getGroup()));
Assert.assertTrue(d2Status.getAccessTime() == 300);
Assert.assertTrue(d2Status.getModificationTime() == 300);
Assert.assertFalse(srcStatus.getReplication() == d2Status.getReplication());
// attributes of src -> d1 ? should be no
CopyListingFileStatus d1Status = new CopyListingFileStatus(fs.getFileStatus(d1));
Assert.assertFalse(srcStatus.getPermission().equals(d1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d1Status.getGroup()));
Assert.assertTrue(d1Status.getAccessTime() == 400);
Assert.assertTrue(d1Status.getModificationTime() == 400);
Assert.assertFalse(srcStatus.getReplication() == d1Status.getReplication());
}
@Test
public void testPreserveOnDirectoryUpwardRecursion() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class);
// Remove ACL because tests run with dfs.namenode.acls.enabled false
attributes.remove(FileAttribute.ACL);
Path src = new Path("/tmp/src2");
Path f0 = new Path("/f0");
Path f1 = new Path("/d1/f1");
Path f2 = new Path("/d1/d2/f2");
Path d1 = new Path("/d1/");
Path d2 = new Path("/d1/d2/");
createFile(fs, src);
createFile(fs, f0);
createFile(fs, f1);
createFile(fs, f2);
fs.setPermission(src, almostFullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(d1, fullPerm);
fs.setOwner(d1, "anybody", "anybody-group");
fs.setTimes(d1, 400, 400);
fs.setReplication(d1, (short) 3);
fs.setPermission(d2, fullPerm);
fs.setOwner(d2, "anybody", "anybody-group");
fs.setTimes(d2, 300, 300);
fs.setReplication(d2, (short) 3);
fs.setPermission(f0, fullPerm);
fs.setOwner(f0, "anybody", "anybody-group");
fs.setTimes(f0, 200, 200);
fs.setReplication(f0, (short) 3);
fs.setPermission(f1, fullPerm);
fs.setOwner(f1, "anybody", "anybody-group");
fs.setTimes(f1, 200, 200);
fs.setReplication(f1, (short) 3);
fs.setPermission(f2, fullPerm);
fs.setOwner(f2, "anybody", "anybody-group");
fs.setTimes(f2, 200, 200);
fs.setReplication(f2, (short) 3);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, d2, srcStatus, attributes, false);
cluster.triggerHeartbeats();
// FileStatus.equals only compares path field, must explicitly compare all fields
// attributes of src -> d2 ? should be yes
CopyListingFileStatus d2Status = new CopyListingFileStatus(fs.getFileStatus(d2));
Assert.assertTrue(srcStatus.getPermission().equals(d2Status.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(d2Status.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(d2Status.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == d2Status.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == d2Status.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() != d2Status.getReplication());
// attributes of src -> d1 ? should be no
CopyListingFileStatus d1Status = new CopyListingFileStatus(fs.getFileStatus(d1));
Assert.assertFalse(srcStatus.getPermission().equals(d1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d1Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == d1Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == d1Status.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() != d1Status.getReplication());
// attributes of src -> f2 ? should be no
CopyListingFileStatus f2Status = new CopyListingFileStatus(fs.getFileStatus(f2));
Assert.assertFalse(srcStatus.getPermission().equals(f2Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f2Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f2Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f2Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f2Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f2Status.getReplication());
// attributes of src -> f1 ? should be no
CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1));
Assert.assertFalse(srcStatus.getPermission().equals(f1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f1Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f1Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f1Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f1Status.getReplication());
// attributes of src -> f0 ? should be no
CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0));
Assert.assertFalse(srcStatus.getPermission().equals(f0Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f0Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f0Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f0Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f0Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f0Status.getReplication());
}
@Test
public void testPreserveOnFileDownwardRecursion() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class);
// Remove ACL because tests run with dfs.namenode.acls.enabled false
attributes.remove(FileAttribute.ACL);
Path src = new Path("/tmp/src2");
Path f0 = new Path("/f0");
Path f1 = new Path("/d1/f1");
Path f2 = new Path("/d1/d2/f2");
Path d1 = new Path("/d1/");
Path d2 = new Path("/d1/d2/");
createFile(fs, src);
createFile(fs, f0);
createFile(fs, f1);
createFile(fs, f2);
fs.setPermission(src, almostFullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(d1, fullPerm);
fs.setOwner(d1, "anybody", "anybody-group");
fs.setTimes(d1, 400, 400);
fs.setReplication(d1, (short) 3);
fs.setPermission(d2, fullPerm);
fs.setOwner(d2, "anybody", "anybody-group");
fs.setTimes(d2, 300, 300);
fs.setReplication(d2, (short) 3);
fs.setPermission(f0, fullPerm);
fs.setOwner(f0, "anybody", "anybody-group");
fs.setTimes(f0, 200, 200);
fs.setReplication(f0, (short) 3);
fs.setPermission(f1, fullPerm);
fs.setOwner(f1, "anybody", "anybody-group");
fs.setTimes(f1, 200, 200);
fs.setReplication(f1, (short) 3);
fs.setPermission(f2, fullPerm);
fs.setOwner(f2, "anybody", "anybody-group");
fs.setTimes(f2, 200, 200);
fs.setReplication(f2, (short) 3);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, f0, srcStatus, attributes, false);
cluster.triggerHeartbeats();
// FileStatus.equals only compares path field, must explicitly compare all fields
// attributes of src -> f0 ? should be yes
CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0));
Assert.assertTrue(srcStatus.getPermission().equals(f0Status.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(f0Status.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(f0Status.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == f0Status.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == f0Status.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() == f0Status.getReplication());
// attributes of src -> f1 ? should be no
CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1));
Assert.assertFalse(srcStatus.getPermission().equals(f1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f1Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f1Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f1Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f1Status.getReplication());
// attributes of src -> f2 ? should be no
CopyListingFileStatus f2Status = new CopyListingFileStatus(fs.getFileStatus(f2));
Assert.assertFalse(srcStatus.getPermission().equals(f2Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f2Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f2Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f2Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f2Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f2Status.getReplication());
// attributes of src -> d1 ? should be no
CopyListingFileStatus d1Status = new CopyListingFileStatus(fs.getFileStatus(d1));
Assert.assertFalse(srcStatus.getPermission().equals(d1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d1Status.getGroup()));
Assert.assertTrue(d1Status.getAccessTime() == 400);
Assert.assertTrue(d1Status.getModificationTime() == 400);
Assert.assertFalse(srcStatus.getReplication() == d1Status.getReplication());
// attributes of src -> d2 ? should be no
CopyListingFileStatus d2Status = new CopyListingFileStatus(fs.getFileStatus(d2));
Assert.assertFalse(srcStatus.getPermission().equals(d2Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d2Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d2Status.getGroup()));
Assert.assertTrue(d2Status.getAccessTime() == 300);
Assert.assertTrue(d2Status.getModificationTime() == 300);
Assert.assertFalse(srcStatus.getReplication() == d2Status.getReplication());
}
@Test
public void testPreserveOnDirectoryDownwardRecursion() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class);
// Remove ACL because tests run with dfs.namenode.acls.enabled false
attributes.remove(FileAttribute.ACL);
Path src = new Path("/tmp/src2");
Path f0 = new Path("/f0");
Path f1 = new Path("/d1/f1");
Path f2 = new Path("/d1/d2/f2");
Path d1 = new Path("/d1/");
Path d2 = new Path("/d1/d2/");
Path root = new Path("/");
createFile(fs, src);
createFile(fs, f0);
createFile(fs, f1);
createFile(fs, f2);
fs.setPermission(src, almostFullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(root, fullPerm);
fs.setOwner(root, "anybody", "anybody-group");
fs.setTimes(root, 400, 400);
fs.setReplication(root, (short) 3);
fs.setPermission(d1, fullPerm);
fs.setOwner(d1, "anybody", "anybody-group");
fs.setTimes(d1, 400, 400);
fs.setReplication(d1, (short) 3);
fs.setPermission(d2, fullPerm);
fs.setOwner(d2, "anybody", "anybody-group");
fs.setTimes(d2, 300, 300);
fs.setReplication(d2, (short) 3);
fs.setPermission(f0, fullPerm);
fs.setOwner(f0, "anybody", "anybody-group");
fs.setTimes(f0, 200, 200);
fs.setReplication(f0, (short) 3);
fs.setPermission(f1, fullPerm);
fs.setOwner(f1, "anybody", "anybody-group");
fs.setTimes(f1, 200, 200);
fs.setReplication(f1, (short) 3);
fs.setPermission(f2, fullPerm);
fs.setOwner(f2, "anybody", "anybody-group");
fs.setTimes(f2, 200, 200);
fs.setReplication(f2, (short) 3);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, root, srcStatus, attributes, false);
cluster.triggerHeartbeats();
// FileStatus.equals only compares path field, must explicitly compare all fields
// attributes of src -> root ? should be yes
CopyListingFileStatus rootStatus = new CopyListingFileStatus(fs.getFileStatus(root));
Assert.assertTrue(srcStatus.getPermission().equals(rootStatus.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(rootStatus.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(rootStatus.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == rootStatus.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == rootStatus.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() != rootStatus.getReplication());
// attributes of src -> d1 ? should be no
CopyListingFileStatus d1Status = new CopyListingFileStatus(fs.getFileStatus(d1));
Assert.assertFalse(srcStatus.getPermission().equals(d1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d1Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == d1Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == d1Status.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() != d1Status.getReplication());
// attributes of src -> d2 ? should be no
CopyListingFileStatus d2Status = new CopyListingFileStatus(fs.getFileStatus(d2));
Assert.assertFalse(srcStatus.getPermission().equals(d2Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d2Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d2Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == d2Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == d2Status.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() != d2Status.getReplication());
// attributes of src -> f0 ? should be no
CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0));
Assert.assertFalse(srcStatus.getPermission().equals(f0Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f0Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f0Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f0Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f0Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f0Status.getReplication());
// attributes of src -> f1 ? should be no
CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1));
Assert.assertFalse(srcStatus.getPermission().equals(f1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f1Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f1Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f1Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f1Status.getReplication());
// attributes of src -> f2 ? should be no
CopyListingFileStatus f2Status = new CopyListingFileStatus(fs.getFileStatus(f2));
Assert.assertFalse(srcStatus.getPermission().equals(f2Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f2Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f2Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f2Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f2Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f2Status.getReplication());
}
private static Random rand = new Random();
public static String createTestSetup(FileSystem fs) throws IOException {
return createTestSetup("/tmp1", fs, FsPermission.getDefault());
}
public static String createTestSetup(FileSystem fs,
FsPermission perm) throws IOException {
return createTestSetup("/tmp1", fs, perm);
}
public static String createTestSetup(String baseDir,
FileSystem fs,
FsPermission perm) throws IOException {
String base = getBase(baseDir);
fs.mkdirs(new Path(base + "/newTest/hello/world1"));
fs.mkdirs(new Path(base + "/newTest/hello/world2/newworld"));
fs.mkdirs(new Path(base + "/newTest/hello/world3/oldworld"));
fs.setPermission(new Path(base + "/newTest"), perm);
fs.setPermission(new Path(base + "/newTest/hello"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world1"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world2"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world2/newworld"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world3"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world3/oldworld"), perm);
createFile(fs, new Path(base, "/newTest/1"));
createFile(fs, new Path(base, "/newTest/hello/2"));
createFile(fs, new Path(base, "/newTest/hello/world3/oldworld/3"));
createFile(fs, new Path(base, "/newTest/hello/world2/4"));
return base;
}
private static String getBase(String base) {
String location = String.valueOf(rand.nextLong());
return base + "/" + location;
}
public static void delete(FileSystem fs, String path) {
try {
if (fs != null) {
if (path != null) {
fs.delete(new Path(path), true);
}
}
} catch (IOException e) {
LOG.warn("Exception encountered ", e);
}
}
public static void createFile(FileSystem fs, String filePath) throws IOException {
Path path = new Path(filePath);
createFile(fs, path);
}
/** Creates a new, empty file at filePath and always overwrites */
public static void createFile(FileSystem fs, Path filePath) throws IOException {
OutputStream out = fs.create(filePath, true);
IOUtils.closeStream(out);
}
/** Creates a new, empty directory at dirPath and always overwrites */
public static void createDirectory(FileSystem fs, Path dirPath) throws IOException {
fs.delete(dirPath, true);
boolean created = fs.mkdirs(dirPath);
if (!created) {
LOG.warn("Could not create directory " + dirPath + " this might cause test failures.");
}
}
public static boolean checkIfFoldersAreInSync(FileSystem fs, String targetBase, String sourceBase)
throws IOException {
Path base = new Path(targetBase);
Stack<Path> stack = new Stack<Path>();
stack.push(base);
while (!stack.isEmpty()) {
Path file = stack.pop();
if (!fs.exists(file)) continue;
FileStatus[] fStatus = fs.listStatus(file);
if (fStatus == null || fStatus.length == 0) continue;
for (FileStatus status : fStatus) {
if (status.isDirectory()) {
stack.push(status.getPath());
}
Assert.assertTrue(fs.exists(new Path(sourceBase + "/" +
DistCpUtils.getRelativePath(new Path(targetBase), status.getPath()))));
}
}
return true;
}
}
| 46,098 | 41.176578 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import org.apache.hadoop.tools.util.ProducerConsumer;
import org.apache.hadoop.tools.util.WorkReport;
import org.apache.hadoop.tools.util.WorkRequest;
import org.apache.hadoop.tools.util.WorkRequestProcessor;
import org.junit.Assert;
import org.junit.Test;
import java.lang.Exception;
import java.lang.Integer;
public class TestProducerConsumer {
public class CopyProcessor implements WorkRequestProcessor<Integer, Integer> {
public WorkReport<Integer> processItem(WorkRequest<Integer> workRequest) {
Integer item = new Integer(workRequest.getItem());
return new WorkReport<Integer>(item, 0, true);
}
}
public class ExceptionProcessor implements WorkRequestProcessor<Integer, Integer> {
@SuppressWarnings("null")
public WorkReport<Integer> processItem(WorkRequest<Integer> workRequest) {
try {
Integer item = null;
item.intValue(); // Throw NULL pointer exception.
// We should never be here (null pointer exception above)
return new WorkReport<Integer>(item, 0, true);
} catch (Exception e) {
Integer item = new Integer(workRequest.getItem());
return new WorkReport<Integer>(item, 1, false, e);
}
}
}
@Test
public void testSimpleProducerConsumer() {
ProducerConsumer<Integer, Integer> worker =
new ProducerConsumer<Integer, Integer>(1);
worker.addWorker(new CopyProcessor());
worker.put(new WorkRequest<Integer>(42));
try {
WorkReport<Integer> report = worker.take();
Assert.assertEquals(42, report.getItem().intValue());
} catch (InterruptedException ie) {
Assert.assertTrue(false);
}
}
@Test
public void testMultipleProducerConsumer() {
ProducerConsumer<Integer, Integer> workers =
new ProducerConsumer<Integer, Integer>(10);
for (int i = 0; i < 10; i++) {
workers.addWorker(new CopyProcessor());
}
int sum = 0;
int numRequests = 2000;
for (int i = 0; i < numRequests; i++) {
workers.put(new WorkRequest<Integer>(i + 42));
sum += i + 42;
}
int numReports = 0;
while (workers.getWorkCnt() > 0) {
WorkReport<Integer> report = workers.blockingTake();
sum -= report.getItem().intValue();
numReports++;
}
Assert.assertEquals(0, sum);
Assert.assertEquals(numRequests, numReports);
}
@Test
public void testExceptionProducerConsumer() {
ProducerConsumer<Integer, Integer> worker =
new ProducerConsumer<Integer, Integer>(1);
worker.addWorker(new ExceptionProcessor());
worker.put(new WorkRequest<Integer>(42));
try {
WorkReport<Integer> report = worker.take();
Assert.assertEquals(42, report.getItem().intValue());
Assert.assertFalse(report.getSuccess());
Assert.assertNotNull(report.getException());
} catch (InterruptedException ie) {
Assert.assertTrue(false);
}
}
}
| 3,747 | 33.072727 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestRetriableCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.junit.Assert;
import org.junit.Test;
import java.util.concurrent.TimeUnit;
public class TestRetriableCommand {
private static class MyRetriableCommand extends RetriableCommand {
private int succeedAfter;
private int retryCount = 0;
public MyRetriableCommand(int succeedAfter) {
super("MyRetriableCommand");
this.succeedAfter = succeedAfter;
}
public MyRetriableCommand(int succeedAfter, RetryPolicy retryPolicy) {
super("MyRetriableCommand", retryPolicy);
this.succeedAfter = succeedAfter;
}
@Override
protected Object doExecute(Object... arguments) throws Exception {
if (++retryCount < succeedAfter)
throw new Exception("Transient failure#" + retryCount);
return 0;
}
}
@Test
public void testRetriableCommand() {
try {
new MyRetriableCommand(5).execute(0);
Assert.assertTrue(false);
}
catch (Exception e) {
Assert.assertTrue(true);
}
try {
new MyRetriableCommand(3).execute(0);
Assert.assertTrue(true);
}
catch (Exception e) {
Assert.assertTrue(false);
}
try {
new MyRetriableCommand(5, RetryPolicies.
retryUpToMaximumCountWithFixedSleep(5, 0, TimeUnit.MILLISECONDS)).execute(0);
Assert.assertTrue(true);
}
catch (Exception e) {
Assert.assertTrue(false);
}
}
}
| 2,330 | 27.426829 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.DistCp;
import org.apache.hadoop.util.ToolRunner;
/**
* Utility class for DistCpTests
*/
public class DistCpTestUtils {
/**
* Asserts the XAttrs returned by getXAttrs for a specific path match an
* expected set of XAttrs.
*
* @param path String path to check
* @param fs FileSystem to use for the path
* @param expectedXAttrs XAttr[] expected xAttrs
* @throws Exception if there is any error
*/
public static void assertXAttrs(Path path, FileSystem fs,
Map<String, byte[]> expectedXAttrs)
throws Exception {
Map<String, byte[]> xAttrs = fs.getXAttrs(path);
assertEquals(path.toString(), expectedXAttrs.size(), xAttrs.size());
Iterator<Entry<String, byte[]>> i = expectedXAttrs.entrySet().iterator();
while (i.hasNext()) {
Entry<String, byte[]> e = i.next();
String name = e.getKey();
byte[] value = e.getValue();
if (value == null) {
assertTrue(xAttrs.containsKey(name) && xAttrs.get(name) == null);
} else {
assertArrayEquals(value, xAttrs.get(name));
}
}
}
/**
* Runs distcp from src to dst, preserving XAttrs. Asserts the
* expected exit code.
*
* @param exitCode expected exit code
* @param src distcp src path
* @param dst distcp destination
* @param options distcp command line options
* @param conf Configuration to use
* @throws Exception if there is any error
*/
public static void assertRunDistCp(int exitCode, String src, String dst,
String options, Configuration conf)
throws Exception {
DistCp distCp = new DistCp(conf, null);
String[] optsArr = options == null ?
new String[] { src, dst } :
new String[] { options, src, dst };
assertEquals(exitCode,
ToolRunner.run(conf, distCp, optsArr));
}
}
| 3,035 | 32.733333 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestThrottledInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.IOUtils;
import org.junit.Assert;
import org.junit.Test;
import java.io.*;
public class TestThrottledInputStream {
private static final Log LOG = LogFactory.getLog(TestThrottledInputStream.class);
private static final int BUFF_SIZE = 1024;
private enum CB {ONE_C, BUFFER, BUFF_OFFSET}
@Test
public void testRead() {
File tmpFile;
File outFile;
try {
tmpFile = createFile(1024);
outFile = createFile();
tmpFile.deleteOnExit();
outFile.deleteOnExit();
long maxBandwidth = copyAndAssert(tmpFile, outFile, 0, 1, -1, CB.BUFFER);
copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.BUFFER);
/*
copyAndAssert(tmpFile, outFile, maxBandwidth, 10, 0, CB.BUFFER);
copyAndAssert(tmpFile, outFile, maxBandwidth, 50, 0, CB.BUFFER);
*/
copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.BUFF_OFFSET);
/*
copyAndAssert(tmpFile, outFile, maxBandwidth, 10, 0, CB.BUFF_OFFSET);
copyAndAssert(tmpFile, outFile, maxBandwidth, 50, 0, CB.BUFF_OFFSET);
*/
copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.ONE_C);
/*
copyAndAssert(tmpFile, outFile, maxBandwidth, 10, 0, CB.ONE_C);
copyAndAssert(tmpFile, outFile, maxBandwidth, 50, 0, CB.ONE_C);
*/
} catch (IOException e) {
LOG.error("Exception encountered ", e);
}
}
private long copyAndAssert(File tmpFile, File outFile,
long maxBandwidth, float factor,
int sleepTime, CB flag) throws IOException {
long bandwidth;
ThrottledInputStream in;
long maxBPS = (long) (maxBandwidth / factor);
if (maxBandwidth == 0) {
in = new ThrottledInputStream(new FileInputStream(tmpFile));
} else {
in = new ThrottledInputStream(new FileInputStream(tmpFile), maxBPS);
}
OutputStream out = new FileOutputStream(outFile);
try {
if (flag == CB.BUFFER) {
copyBytes(in, out, BUFF_SIZE);
} else if (flag == CB.BUFF_OFFSET){
copyBytesWithOffset(in, out, BUFF_SIZE);
} else {
copyByteByByte(in, out);
}
LOG.info(in);
bandwidth = in.getBytesPerSec();
Assert.assertEquals(in.getTotalBytesRead(), tmpFile.length());
Assert.assertTrue(in.getBytesPerSec() > maxBandwidth / (factor * 1.2));
Assert.assertTrue(in.getTotalSleepTime() > sleepTime || in.getBytesPerSec() <= maxBPS);
} finally {
IOUtils.closeStream(in);
IOUtils.closeStream(out);
}
return bandwidth;
}
private static void copyBytesWithOffset(InputStream in, OutputStream out, int buffSize)
throws IOException {
byte buf[] = new byte[buffSize];
int bytesRead = in.read(buf, 0, buffSize);
while (bytesRead >= 0) {
out.write(buf, 0, bytesRead);
bytesRead = in.read(buf);
}
}
private static void copyByteByByte(InputStream in, OutputStream out)
throws IOException {
int ch = in.read();
while (ch >= 0) {
out.write(ch);
ch = in.read();
}
}
private static void copyBytes(InputStream in, OutputStream out, int buffSize)
throws IOException {
byte buf[] = new byte[buffSize];
int bytesRead = in.read(buf);
while (bytesRead >= 0) {
out.write(buf, 0, bytesRead);
bytesRead = in.read(buf);
}
}
private File createFile(long sizeInKB) throws IOException {
File tmpFile = createFile();
writeToFile(tmpFile, sizeInKB);
return tmpFile;
}
private File createFile() throws IOException {
return File.createTempFile("tmp", "dat");
}
private void writeToFile(File tmpFile, long sizeInKB) throws IOException {
OutputStream out = new FileOutputStream(tmpFile);
try {
byte[] buffer = new byte [1024];
for (long index = 0; index < sizeInKB; index++) {
out.write(buffer);
}
} finally {
IOUtils.closeStream(out);
}
}
}
| 4,856 | 29.740506 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.tools.CopyListing;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import org.apache.hadoop.tools.GlobbedCopyListing;
import org.apache.hadoop.tools.util.TestDistCpUtils;
import org.apache.hadoop.security.Credentials;
import org.junit.*;
import java.io.IOException;
import java.util.*;
public class TestCopyCommitter {
private static final Log LOG = LogFactory.getLog(TestCopyCommitter.class);
private static final Random rand = new Random();
private static final Credentials CREDENTIALS = new Credentials();
public static final int PORT = 39737;
private static Configuration config;
private static MiniDFSCluster cluster;
private static Job getJobForClient() throws IOException {
Job job = Job.getInstance(new Configuration());
job.getConfiguration().set("mapred.job.tracker", "localhost:" + PORT);
job.setInputFormatClass(NullInputFormat.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setNumReduceTasks(0);
return job;
}
@BeforeClass
public static void create() throws IOException {
config = getJobForClient().getConfiguration();
config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, 0);
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true)
.build();
}
@AfterClass
public static void destroy() {
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void createMetaFolder() {
config.set(DistCpConstants.CONF_LABEL_META_FOLDER, "/meta");
Path meta = new Path("/meta");
try {
cluster.getFileSystem().mkdirs(meta);
} catch (IOException e) {
LOG.error("Exception encountered while creating meta folder", e);
Assert.fail("Unable to create meta folder");
}
}
@After
public void cleanupMetaFolder() {
Path meta = new Path("/meta");
try {
if (cluster.getFileSystem().exists(meta)) {
cluster.getFileSystem().delete(meta, true);
Assert.fail("Expected meta folder to be deleted");
}
} catch (IOException e) {
LOG.error("Exception encountered while cleaning up folder", e);
Assert.fail("Unable to clean up meta folder");
}
}
@Test
public void testNoCommitAction() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
taskAttemptContext.getTaskAttemptID().getJobID());
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
committer.commitJob(jobContext);
Assert.assertEquals(taskAttemptContext.getStatus(), "Commit Successful");
//Test for idempotent commit
committer.commitJob(jobContext);
Assert.assertEquals(taskAttemptContext.getStatus(), "Commit Successful");
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Commit failed");
}
}
@Test
public void testPreserveStatus() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String sourceBase;
String targetBase;
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
FsPermission sourcePerm = new FsPermission((short) 511);
FsPermission initialPerm = new FsPermission((short) 448);
sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);
DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)),
new Path("/out"));
options.preserve(FileAttribute.PERMISSION);
options.appendToConf(conf);
options.setTargetPathExists(false);
CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
listing.buildListing(listingFile, options);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
committer.commitJob(jobContext);
if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
Assert.fail("Permission don't match");
}
//Test for idempotent commit
committer.commitJob(jobContext);
if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
Assert.fail("Permission don't match");
}
} catch (IOException e) {
LOG.error("Exception encountered while testing for preserve status", e);
Assert.fail("Preserve status failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp1");
conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
}
}
@Test
public void testDeleteMissing() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String sourceBase;
String targetBase;
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
sourceBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
targetBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
String targetBaseAdd = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
fs.rename(new Path(targetBaseAdd), new Path(targetBase));
DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)),
new Path("/out"));
options.setSyncFolder(true);
options.setDeleteMissing(true);
options.appendToConf(conf);
CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
listing.buildListing(listingFile, options);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase);
committer.commitJob(jobContext);
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
Assert.fail("Source and target folders are not in sync");
}
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) {
Assert.fail("Source and target folders are not in sync");
}
//Test for idempotent commit
committer.commitJob(jobContext);
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
Assert.fail("Source and target folders are not in sync");
}
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) {
Assert.fail("Source and target folders are not in sync");
}
} catch (Throwable e) {
LOG.error("Exception encountered while testing for delete missing", e);
Assert.fail("Delete missing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp1");
conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false");
}
}
@Test
public void testDeleteMissingFlatInterleavedFiles() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String sourceBase;
String targetBase;
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
sourceBase = "/tmp1/" + String.valueOf(rand.nextLong());
targetBase = "/tmp1/" + String.valueOf(rand.nextLong());
TestDistCpUtils.createFile(fs, sourceBase + "/1");
TestDistCpUtils.createFile(fs, sourceBase + "/3");
TestDistCpUtils.createFile(fs, sourceBase + "/4");
TestDistCpUtils.createFile(fs, sourceBase + "/5");
TestDistCpUtils.createFile(fs, sourceBase + "/7");
TestDistCpUtils.createFile(fs, sourceBase + "/8");
TestDistCpUtils.createFile(fs, sourceBase + "/9");
TestDistCpUtils.createFile(fs, targetBase + "/2");
TestDistCpUtils.createFile(fs, targetBase + "/4");
TestDistCpUtils.createFile(fs, targetBase + "/5");
TestDistCpUtils.createFile(fs, targetBase + "/7");
TestDistCpUtils.createFile(fs, targetBase + "/9");
TestDistCpUtils.createFile(fs, targetBase + "/A");
DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)),
new Path("/out"));
options.setSyncFolder(true);
options.setDeleteMissing(true);
options.appendToConf(conf);
CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
listing.buildListing(listingFile, options);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase);
committer.commitJob(jobContext);
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
Assert.fail("Source and target folders are not in sync");
}
Assert.assertEquals(fs.listStatus(new Path(targetBase)).length, 4);
//Test for idempotent commit
committer.commitJob(jobContext);
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
Assert.fail("Source and target folders are not in sync");
}
Assert.assertEquals(fs.listStatus(new Path(targetBase)).length, 4);
} catch (IOException e) {
LOG.error("Exception encountered while testing for delete missing", e);
Assert.fail("Delete missing failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp1");
conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false");
}
}
@Test
public void testAtomicCommitMissingFinal() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String workPath = "/tmp1/" + String.valueOf(rand.nextLong());
String finalPath = "/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true);
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertFalse(fs.exists(new Path(finalPath)));
committer.commitJob(jobContext);
Assert.assertFalse(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
//Test for idempotent commit
committer.commitJob(jobContext);
Assert.assertFalse(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
} catch (IOException e) {
LOG.error("Exception encountered while testing for preserve status", e);
Assert.fail("Atomic commit failure");
} finally {
TestDistCpUtils.delete(fs, workPath);
TestDistCpUtils.delete(fs, finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false);
}
}
@Test
public void testAtomicCommitExistingFinal() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String workPath = "/tmp1/" + String.valueOf(rand.nextLong());
String finalPath = "/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
fs.mkdirs(new Path(finalPath));
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true);
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
try {
committer.commitJob(jobContext);
Assert.fail("Should not be able to atomic-commit to pre-existing path.");
} catch(Exception exception) {
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
LOG.info("Atomic-commit Test pass.");
}
} catch (IOException e) {
LOG.error("Exception encountered while testing for atomic commit.", e);
Assert.fail("Atomic commit failure");
} finally {
TestDistCpUtils.delete(fs, workPath);
TestDistCpUtils.delete(fs, finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false);
}
}
private TaskAttemptContext getTaskAttemptContext(Configuration conf) {
return new TaskAttemptContextImpl(conf,
new TaskAttemptID("200707121733", 1, TaskType.MAP, 1, 1));
}
private boolean checkDirectoryPermissions(FileSystem fs, String targetBase,
FsPermission sourcePerm) throws IOException {
Path base = new Path(targetBase);
Stack<Path> stack = new Stack<Path>();
stack.push(base);
while (!stack.isEmpty()) {
Path file = stack.pop();
if (!fs.exists(file)) continue;
FileStatus[] fStatus = fs.listStatus(file);
if (fStatus == null || fStatus.length == 0) continue;
for (FileStatus status : fStatus) {
if (status.isDirectory()) {
stack.push(status.getPath());
Assert.assertEquals(status.getPermission(), sourcePerm);
}
}
}
return true;
}
private static class NullInputFormat extends InputFormat {
@Override
public List getSplits(JobContext context)
throws IOException, InterruptedException {
return Collections.EMPTY_LIST;
}
@Override
public RecordReader createRecordReader(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
return null;
}
}
}
| 16,428 | 37.747642 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestRetriableFileCopyCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.tools.mapred.CopyMapper.FileAction;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
public class TestRetriableFileCopyCommand {
@SuppressWarnings("rawtypes")
@Test
public void testFailOnCloseError() throws Exception {
Mapper.Context context = mock(Mapper.Context.class);
doReturn(new Configuration()).when(context).getConfiguration();
Exception expectedEx = new IOException("boom");
OutputStream out = mock(OutputStream.class);
doThrow(expectedEx).when(out).close();
File f = File.createTempFile(this.getClass().getSimpleName(), null);
f.deleteOnExit();
FileStatus stat =
new FileStatus(1L, false, 1, 1024, 0, new Path(f.toURI()));
Exception actualEx = null;
try {
new RetriableFileCopyCommand("testFailOnCloseError", FileAction.OVERWRITE)
.copyBytes(stat, 0, out, 512, context);
} catch (Exception e) {
actualEx = e;
}
assertNotNull("close didn't fail", actualEx);
assertEquals(expectedEx, actualEx);
}
}
| 2,164 | 34.491803 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.DistCpConstants;
import org.junit.Test;
import org.junit.Assert;
import java.io.IOException;
public class TestCopyOutputFormat {
private static final Log LOG = LogFactory.getLog(TestCopyOutputFormat.class);
@Test
public void testSetCommitDirectory() {
try {
Job job = Job.getInstance(new Configuration());
Assert.assertEquals(null, CopyOutputFormat.getCommitDirectory(job));
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, "");
Assert.assertEquals(null, CopyOutputFormat.getCommitDirectory(job));
Path directory = new Path("/tmp/test");
CopyOutputFormat.setCommitDirectory(job, directory);
Assert.assertEquals(directory, CopyOutputFormat.getCommitDirectory(job));
Assert.assertEquals(directory.toString(), job.getConfiguration().
get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
} catch (IOException e) {
LOG.error("Exception encountered while running test", e);
Assert.fail("Failed while testing for set Commit Directory");
}
}
@Test
public void testSetWorkingDirectory() {
try {
Job job = Job.getInstance(new Configuration());
Assert.assertEquals(null, CopyOutputFormat.getWorkingDirectory(job));
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, "");
Assert.assertEquals(null, CopyOutputFormat.getWorkingDirectory(job));
Path directory = new Path("/tmp/test");
CopyOutputFormat.setWorkingDirectory(job, directory);
Assert.assertEquals(directory, CopyOutputFormat.getWorkingDirectory(job));
Assert.assertEquals(directory.toString(), job.getConfiguration().
get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
} catch (IOException e) {
LOG.error("Exception encountered while running test", e);
Assert.fail("Failed while testing for set Working Directory");
}
}
@Test
public void testGetOutputCommitter() {
try {
TaskAttemptContext context = new TaskAttemptContextImpl(new Configuration(),
new TaskAttemptID("200707121733", 1, TaskType.MAP, 1, 1));
context.getConfiguration().set("mapred.output.dir", "/out");
Assert.assertTrue(new CopyOutputFormat().getOutputCommitter(context) instanceof CopyCommitter);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Unable to get output committer");
}
}
@Test
public void testCheckOutputSpecs() {
try {
OutputFormat outputFormat = new CopyOutputFormat();
Job job = Job.getInstance(new Configuration());
JobID jobID = new JobID("200707121733", 1);
try {
JobContext context = new JobContextImpl(job.getConfiguration(), jobID);
outputFormat.checkOutputSpecs(context);
Assert.fail("No checking for invalid work/commit path");
} catch (IllegalStateException ignore) { }
CopyOutputFormat.setWorkingDirectory(job, new Path("/tmp/work"));
try {
JobContext context = new JobContextImpl(job.getConfiguration(), jobID);
outputFormat.checkOutputSpecs(context);
Assert.fail("No checking for invalid commit path");
} catch (IllegalStateException ignore) { }
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, "");
CopyOutputFormat.setCommitDirectory(job, new Path("/tmp/commit"));
try {
JobContext context = new JobContextImpl(job.getConfiguration(), jobID);
outputFormat.checkOutputSpecs(context);
Assert.fail("No checking for invalid work path");
} catch (IllegalStateException ignore) { }
CopyOutputFormat.setWorkingDirectory(job, new Path("/tmp/work"));
CopyOutputFormat.setCommitDirectory(job, new Path("/tmp/commit"));
try {
JobContext context = new JobContextImpl(job.getConfiguration(), jobID);
outputFormat.checkOutputSpecs(context);
} catch (IllegalStateException ignore) {
Assert.fail("Output spec check failed.");
}
} catch (IOException e) {
LOG.error("Exception encountered while testing checkoutput specs", e);
Assert.fail("Checkoutput Spec failure");
} catch (InterruptedException e) {
LOG.error("Exception encountered while testing checkoutput specs", e);
Assert.fail("Checkoutput Spec failure");
}
}
}
| 5,539 | 39.735294 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.tools.CopyListing;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.StubContext;
import org.apache.hadoop.security.Credentials;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
public class TestUniformSizeInputFormat {
private static MiniDFSCluster cluster;
private static final int N_FILES = 20;
private static final int SIZEOF_EACH_FILE=1024;
private static final Random random = new Random();
private static int totalFileSize = 0;
private static final Credentials CREDENTIALS = new Credentials();
@BeforeClass
public static void setup() throws Exception {
cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1)
.format(true).build();
totalFileSize = 0;
for (int i=0; i<N_FILES; ++i)
totalFileSize += createFile("/tmp/source/" + String.valueOf(i), SIZEOF_EACH_FILE);
}
private static DistCpOptions getOptions(int nMaps) throws Exception {
Path sourcePath = new Path(cluster.getFileSystem().getUri().toString()
+ "/tmp/source");
Path targetPath = new Path(cluster.getFileSystem().getUri().toString()
+ "/tmp/target");
List<Path> sourceList = new ArrayList<Path>();
sourceList.add(sourcePath);
final DistCpOptions distCpOptions = new DistCpOptions(sourceList, targetPath);
distCpOptions.setMaxMaps(nMaps);
return distCpOptions;
}
private static int createFile(String path, int fileSize) throws Exception {
FileSystem fileSystem = null;
DataOutputStream outputStream = null;
try {
fileSystem = cluster.getFileSystem();
outputStream = fileSystem.create(new Path(path), true, 0);
int size = (int) Math.ceil(fileSize + (1 - random.nextFloat()) * fileSize);
outputStream.write(new byte[size]);
return size;
}
finally {
IOUtils.cleanup(null, fileSystem, outputStream);
}
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
public void testGetSplits(int nMaps) throws Exception {
DistCpOptions options = getOptions(nMaps);
Configuration configuration = new Configuration();
configuration.set("mapred.map.tasks",
String.valueOf(options.getMaxMaps()));
Path listFile = new Path(cluster.getFileSystem().getUri().toString()
+ "/tmp/testGetSplits_1/fileList.seq");
CopyListing.getCopyListing(configuration, CREDENTIALS, options).
buildListing(listFile, options);
JobContext jobContext = new JobContextImpl(configuration, new JobID());
UniformSizeInputFormat uniformSizeInputFormat = new UniformSizeInputFormat();
List<InputSplit> splits
= uniformSizeInputFormat.getSplits(jobContext);
int sizePerMap = totalFileSize/nMaps;
checkSplits(listFile, splits);
int doubleCheckedTotalSize = 0;
int previousSplitSize = -1;
for (int i=0; i<splits.size(); ++i) {
InputSplit split = splits.get(i);
int currentSplitSize = 0;
RecordReader<Text, CopyListingFileStatus> recordReader =
uniformSizeInputFormat.createRecordReader(split, null);
StubContext stubContext = new StubContext(jobContext.getConfiguration(),
recordReader, 0);
final TaskAttemptContext taskAttemptContext
= stubContext.getContext();
recordReader.initialize(split, taskAttemptContext);
while (recordReader.nextKeyValue()) {
Path sourcePath = recordReader.getCurrentValue().getPath();
FileSystem fs = sourcePath.getFileSystem(configuration);
FileStatus fileStatus [] = fs.listStatus(sourcePath);
if (fileStatus.length > 1) {
continue;
}
currentSplitSize += fileStatus[0].getLen();
}
Assert.assertTrue(
previousSplitSize == -1
|| Math.abs(currentSplitSize - previousSplitSize) < 0.1*sizePerMap
|| i == splits.size()-1);
doubleCheckedTotalSize += currentSplitSize;
}
Assert.assertEquals(totalFileSize, doubleCheckedTotalSize);
}
private void checkSplits(Path listFile, List<InputSplit> splits) throws IOException {
long lastEnd = 0;
//Verify if each split's start is matching with the previous end and
//we are not missing anything
for (InputSplit split : splits) {
FileSplit fileSplit = (FileSplit) split;
long start = fileSplit.getStart();
Assert.assertEquals(lastEnd, start);
lastEnd = start + fileSplit.getLength();
}
//Verify there is nothing more to read from the input file
SequenceFile.Reader reader
= new SequenceFile.Reader(cluster.getFileSystem().getConf(),
SequenceFile.Reader.file(listFile));
try {
reader.seek(lastEnd);
CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
Text srcRelPath = new Text();
Assert.assertFalse(reader.next(srcRelPath, srcFileStatus));
} finally {
IOUtils.closeStream(reader);
}
}
@Test
public void testGetSplits() throws Exception {
testGetSplits(9);
for (int i=1; i<N_FILES; ++i)
testGetSplits(i);
}
}
| 6,809 | 35.417112 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.DistCpOptionSwitch;
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.StubContext;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.util.DataChecksum;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestCopyMapper {
private static final Log LOG = LogFactory.getLog(TestCopyMapper.class);
private static List<Path> pathList = new ArrayList<Path>();
private static int nFiles = 0;
private static final int DEFAULT_FILE_SIZE = 1024;
private static final long NON_DEFAULT_BLOCK_SIZE = 4096;
private static MiniDFSCluster cluster;
private static final String SOURCE_PATH = "/tmp/source";
private static final String TARGET_PATH = "/tmp/target";
private static Configuration configuration;
@BeforeClass
public static void setup() throws Exception {
configuration = getConfigurationForCluster();
cluster = new MiniDFSCluster.Builder(configuration)
.numDataNodes(1)
.format(true)
.build();
}
private static Configuration getConfigurationForCluster() throws IOException {
Configuration configuration = new Configuration();
System.setProperty("test.build.data", "target/tmp/build/TEST_COPY_MAPPER/data");
configuration.set("hadoop.log.dir", "target/tmp");
configuration.set("dfs.namenode.fs-limits.min-block-size", "0");
LOG.debug("fs.default.name == " + configuration.get("fs.default.name"));
LOG.debug("dfs.http.address == " + configuration.get("dfs.http.address"));
return configuration;
}
private static Configuration getConfiguration() throws IOException {
Configuration configuration = getConfigurationForCluster();
final FileSystem fs = cluster.getFileSystem();
Path workPath = new Path(TARGET_PATH)
.makeQualified(fs.getUri(), fs.getWorkingDirectory());
configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workPath.toString());
configuration.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
workPath.toString());
configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(),
false);
configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(),
false);
configuration.setBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),
true);
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
"br");
return configuration;
}
private static void createSourceData() throws Exception {
mkdirs(SOURCE_PATH + "/1");
mkdirs(SOURCE_PATH + "/2");
mkdirs(SOURCE_PATH + "/2/3/4");
mkdirs(SOURCE_PATH + "/2/3");
mkdirs(SOURCE_PATH + "/5");
touchFile(SOURCE_PATH + "/5/6");
mkdirs(SOURCE_PATH + "/7");
mkdirs(SOURCE_PATH + "/7/8");
touchFile(SOURCE_PATH + "/7/8/9");
}
private static void appendSourceData() throws Exception {
FileSystem fs = cluster.getFileSystem();
for (Path source : pathList) {
if (fs.getFileStatus(source).isFile()) {
// append 2048 bytes per file
appendFile(source, DEFAULT_FILE_SIZE * 2);
}
}
}
private static void createSourceDataWithDifferentBlockSize() throws Exception {
mkdirs(SOURCE_PATH + "/1");
mkdirs(SOURCE_PATH + "/2");
mkdirs(SOURCE_PATH + "/2/3/4");
mkdirs(SOURCE_PATH + "/2/3");
mkdirs(SOURCE_PATH + "/5");
touchFile(SOURCE_PATH + "/5/6", true, null);
mkdirs(SOURCE_PATH + "/7");
mkdirs(SOURCE_PATH + "/7/8");
touchFile(SOURCE_PATH + "/7/8/9");
}
private static void createSourceDataWithDifferentChecksumType()
throws Exception {
mkdirs(SOURCE_PATH + "/1");
mkdirs(SOURCE_PATH + "/2");
mkdirs(SOURCE_PATH + "/2/3/4");
mkdirs(SOURCE_PATH + "/2/3");
mkdirs(SOURCE_PATH + "/5");
touchFile(SOURCE_PATH + "/5/6", new ChecksumOpt(DataChecksum.Type.CRC32,
512));
mkdirs(SOURCE_PATH + "/7");
mkdirs(SOURCE_PATH + "/7/8");
touchFile(SOURCE_PATH + "/7/8/9", new ChecksumOpt(DataChecksum.Type.CRC32C,
512));
}
private static void mkdirs(String path) throws Exception {
FileSystem fileSystem = cluster.getFileSystem();
final Path qualifiedPath = new Path(path).makeQualified(fileSystem.getUri(),
fileSystem.getWorkingDirectory());
pathList.add(qualifiedPath);
fileSystem.mkdirs(qualifiedPath);
}
private static void touchFile(String path) throws Exception {
touchFile(path, false, null);
}
private static void touchFile(String path, ChecksumOpt checksumOpt)
throws Exception {
// create files with specific checksum opt and non-default block size
touchFile(path, true, checksumOpt);
}
private static void touchFile(String path, boolean createMultipleBlocks,
ChecksumOpt checksumOpt) throws Exception {
FileSystem fs;
DataOutputStream outputStream = null;
try {
fs = cluster.getFileSystem();
final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
fs.getWorkingDirectory());
final long blockSize = createMultipleBlocks ? NON_DEFAULT_BLOCK_SIZE : fs
.getDefaultBlockSize(qualifiedPath) * 2;
FsPermission permission = FsPermission.getFileDefault().applyUMask(
FsPermission.getUMask(fs.getConf()));
outputStream = fs.create(qualifiedPath, permission,
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 0,
(short) (fs.getDefaultReplication(qualifiedPath) * 2), blockSize,
null, checksumOpt);
byte[] bytes = new byte[DEFAULT_FILE_SIZE];
outputStream.write(bytes);
long fileSize = DEFAULT_FILE_SIZE;
if (createMultipleBlocks) {
while (fileSize < 2*blockSize) {
outputStream.write(bytes);
outputStream.flush();
fileSize += DEFAULT_FILE_SIZE;
}
}
pathList.add(qualifiedPath);
++nFiles;
FileStatus fileStatus = fs.getFileStatus(qualifiedPath);
System.out.println(fileStatus.getBlockSize());
System.out.println(fileStatus.getReplication());
}
finally {
IOUtils.cleanup(null, outputStream);
}
}
/**
* Append specified length of bytes to a given file
*/
private static void appendFile(Path p, int length) throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
FSDataOutputStream out = cluster.getFileSystem().append(p);
try {
out.write(toAppend);
} finally {
IOUtils.closeStream(out);
}
}
@Test
public void testCopyWithDifferentChecksumType() throws Exception {
testCopy(true);
}
@Test(timeout=40000)
public void testRun() throws Exception {
testCopy(false);
}
@Test
public void testCopyWithAppend() throws Exception {
final FileSystem fs = cluster.getFileSystem();
// do the first distcp
testCopy(false);
// start appending data to source
appendSourceData();
// do the distcp again with -update and -append option
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
stubContext.getContext();
// Enable append
context.getConfiguration().setBoolean(
DistCpOptionSwitch.APPEND.getConfigLabel(), true);
copyMapper.setup(context);
for (Path path: pathList) {
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(cluster.getFileSystem().getFileStatus(
path)), context);
}
verifyCopy(fs, false);
// verify that we only copied new appended data
Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE * 2, stubContext
.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
.getValue());
Assert.assertEquals(pathList.size(), stubContext.getReporter().
getCounter(CopyMapper.Counter.COPY).getValue());
}
private void testCopy(boolean preserveChecksum) throws Exception {
deleteState();
if (preserveChecksum) {
createSourceDataWithDifferentChecksumType();
} else {
createSourceData();
}
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= EnumSet.of(DistCpOptions.FileAttribute.REPLICATION);
if (preserveChecksum) {
fileAttributes.add(DistCpOptions.FileAttribute.CHECKSUMTYPE);
}
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path: pathList) {
copyMapper.map(
new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fs.getFileStatus(path)), context);
}
// Check that the maps worked.
verifyCopy(fs, preserveChecksum);
Assert.assertEquals(pathList.size(), stubContext.getReporter()
.getCounter(CopyMapper.Counter.COPY).getValue());
if (!preserveChecksum) {
Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE, stubContext
.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
.getValue());
} else {
Assert.assertEquals(nFiles * NON_DEFAULT_BLOCK_SIZE * 2, stubContext
.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
.getValue());
}
testCopyingExistingFiles(fs, copyMapper, context);
for (Text value : stubContext.getWriter().values()) {
Assert.assertTrue(value.toString() + " is not skipped", value
.toString().startsWith("SKIP:"));
}
}
private void verifyCopy(FileSystem fs, boolean preserveChecksum)
throws Exception {
for (Path path : pathList) {
final Path targetPath = new Path(path.toString().replaceAll(SOURCE_PATH,
TARGET_PATH));
Assert.assertTrue(fs.exists(targetPath));
Assert.assertTrue(fs.isFile(targetPath) == fs.isFile(path));
FileStatus sourceStatus = fs.getFileStatus(path);
FileStatus targetStatus = fs.getFileStatus(targetPath);
Assert.assertEquals(sourceStatus.getReplication(),
targetStatus.getReplication());
if (preserveChecksum) {
Assert.assertEquals(sourceStatus.getBlockSize(),
targetStatus.getBlockSize());
}
Assert.assertTrue(!fs.isFile(targetPath)
|| fs.getFileChecksum(targetPath).equals(fs.getFileChecksum(path)));
}
}
private void testCopyingExistingFiles(FileSystem fs, CopyMapper copyMapper,
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context) {
try {
for (Path path : pathList) {
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fs.getFileStatus(path)), context);
}
Assert.assertEquals(nFiles,
context.getCounter(CopyMapper.Counter.SKIP).getValue());
}
catch (Exception exception) {
Assert.assertTrue("Caught unexpected exception:" + exception.getMessage(),
false);
}
}
@Test(timeout=40000)
public void testMakeDirFailure() {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
String workPath = new Path("hftp://localhost:1234/*/*/*/?/")
.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workPath);
copyMapper.setup(context);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context);
Assert.assertTrue("There should have been an exception.", false);
}
catch (Exception ignore) {
}
}
@Test(timeout=40000)
public void testIgnoreFailures() {
doTestIgnoreFailures(true);
doTestIgnoreFailures(false);
}
@Test(timeout=40000)
public void testDirToFile() {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
mkdirs(SOURCE_PATH + "/src/file");
touchFile(TARGET_PATH + "/src/file");
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),
new CopyListingFileStatus(fs.getFileStatus(
new Path(SOURCE_PATH + "/src/file"))),
context);
} catch (IOException e) {
Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
}
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
}
}
@Test(timeout=40000)
public void testPreserve() {
try {
deleteState();
createSourceData();
UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");
final CopyMapper copyMapper = new CopyMapper();
final Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
tmpUser.doAs(
new PrivilegedAction<Mapper<Text, CopyListingFileStatus, Text, Text>.Context>() {
@Override
public Mapper<Text, CopyListingFileStatus, Text, Text>.Context run() {
try {
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
return stubContext.getContext();
} catch (Exception e) {
LOG.error("Exception encountered ", e);
throw new RuntimeException(e);
}
}
});
EnumSet<DistCpOptions.FileAttribute> preserveStatus =
EnumSet.allOf(DistCpOptions.FileAttribute.class);
preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
preserveStatus.remove(DistCpOptions.FileAttribute.XATTR);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
DistCpUtils.packAttributes(preserveStatus));
touchFile(SOURCE_PATH + "/src/file");
mkdirs(TARGET_PATH);
cluster.getFileSystem().setPermission(new Path(TARGET_PATH), new FsPermission((short)511));
final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction<FileSystem>() {
@Override
public FileSystem run() {
try {
return FileSystem.get(configuration);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
throw new RuntimeException("Test ought to fail here");
}
}
});
tmpUser.doAs(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),
new CopyListingFileStatus(tmpFS.getFileStatus(
new Path(SOURCE_PATH + "/src/file"))),
context);
Assert.fail("Expected copy to fail");
} catch (AccessControlException e) {
Assert.assertTrue("Got exception: " + e.getMessage(), true);
} catch (Exception e) {
throw new RuntimeException(e);
}
return null;
}
});
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
}
}
@Test(timeout=40000)
public void testCopyReadableFiles() {
try {
deleteState();
createSourceData();
UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");
final CopyMapper copyMapper = new CopyMapper();
final Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
tmpUser.doAs(
new PrivilegedAction<Mapper<Text, CopyListingFileStatus, Text, Text>.Context>() {
@Override
public Mapper<Text, CopyListingFileStatus, Text, Text>.Context run() {
try {
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
return stubContext.getContext();
} catch (Exception e) {
LOG.error("Exception encountered ", e);
throw new RuntimeException(e);
}
}
});
touchFile(SOURCE_PATH + "/src/file");
mkdirs(TARGET_PATH);
cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file"),
new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
cluster.getFileSystem().setPermission(new Path(TARGET_PATH), new FsPermission((short)511));
final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction<FileSystem>() {
@Override
public FileSystem run() {
try {
return FileSystem.get(configuration);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
throw new RuntimeException("Test ought to fail here");
}
}
});
tmpUser.doAs(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),
new CopyListingFileStatus(tmpFS.getFileStatus(
new Path(SOURCE_PATH + "/src/file"))),
context);
} catch (Exception e) {
throw new RuntimeException(e);
}
return null;
}
});
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
}
}
@Test(timeout=40000)
public void testSkipCopyNoPerms() {
try {
deleteState();
createSourceData();
UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");
final CopyMapper copyMapper = new CopyMapper();
final StubContext stubContext = tmpUser.
doAs(new PrivilegedAction<StubContext>() {
@Override
public StubContext run() {
try {
return new StubContext(getConfiguration(), null, 0);
} catch (Exception e) {
LOG.error("Exception encountered ", e);
throw new RuntimeException(e);
}
}
});
final Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
stubContext.getContext();
EnumSet<DistCpOptions.FileAttribute> preserveStatus =
EnumSet.allOf(DistCpOptions.FileAttribute.class);
preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
preserveStatus.remove(DistCpOptions.FileAttribute.XATTR);
preserveStatus.remove(DistCpOptions.FileAttribute.TIMES);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
DistCpUtils.packAttributes(preserveStatus));
touchFile(SOURCE_PATH + "/src/file");
touchFile(TARGET_PATH + "/src/file");
cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file"),
new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"),
new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction<FileSystem>() {
@Override
public FileSystem run() {
try {
return FileSystem.get(configuration);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
throw new RuntimeException("Test ought to fail here");
}
}
});
tmpUser.doAs(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),
new CopyListingFileStatus(tmpFS.getFileStatus(
new Path(SOURCE_PATH + "/src/file"))),
context);
Assert.assertEquals(stubContext.getWriter().values().size(), 1);
Assert.assertTrue(stubContext.getWriter().values().get(0).toString().startsWith("SKIP"));
Assert.assertTrue(stubContext.getWriter().values().get(0).toString().
contains(SOURCE_PATH + "/src/file"));
} catch (Exception e) {
throw new RuntimeException(e);
}
return null;
}
});
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
}
}
@Test(timeout=40000)
public void testFailCopyWithAccessControlException() {
try {
deleteState();
createSourceData();
UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");
final CopyMapper copyMapper = new CopyMapper();
final StubContext stubContext = tmpUser.
doAs(new PrivilegedAction<StubContext>() {
@Override
public StubContext run() {
try {
return new StubContext(getConfiguration(), null, 0);
} catch (Exception e) {
LOG.error("Exception encountered ", e);
throw new RuntimeException(e);
}
}
});
EnumSet<DistCpOptions.FileAttribute> preserveStatus =
EnumSet.allOf(DistCpOptions.FileAttribute.class);
preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
preserveStatus.remove(DistCpOptions.FileAttribute.XATTR);
final Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
DistCpUtils.packAttributes(preserveStatus));
touchFile(SOURCE_PATH + "/src/file");
OutputStream out = cluster.getFileSystem().create(new Path(TARGET_PATH + "/src/file"));
out.write("hello world".getBytes());
out.close();
cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file"),
new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"),
new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction<FileSystem>() {
@Override
public FileSystem run() {
try {
return FileSystem.get(configuration);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
throw new RuntimeException("Test ought to fail here");
}
}
});
tmpUser.doAs(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),
new CopyListingFileStatus(tmpFS.getFileStatus(
new Path(SOURCE_PATH + "/src/file"))),
context);
Assert.fail("Didn't expect the file to be copied");
} catch (AccessControlException ignore) {
} catch (Exception e) {
// We want to make sure the underlying cause of the exception is
// due to permissions error. The exception we're interested in is
// wrapped twice - once in RetriableCommand and again in CopyMapper
// itself.
if (e.getCause() == null || e.getCause().getCause() == null ||
!(e.getCause().getCause() instanceof AccessControlException)) {
throw new RuntimeException(e);
}
}
return null;
}
});
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
}
}
@Test(timeout=40000)
public void testFileToDir() {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
touchFile(SOURCE_PATH + "/src/file");
mkdirs(TARGET_PATH + "/src/file");
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),
new CopyListingFileStatus(fs.getFileStatus(
new Path(SOURCE_PATH + "/src/file"))),
context);
} catch (IOException e) {
Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
}
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
}
}
private void doTestIgnoreFailures(boolean ignoreFailures) {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
configuration.setBoolean(
DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),ignoreFailures);
configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(),
true);
configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(),
true);
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
if (!fileStatus.isDirectory()) {
fs.delete(path, true);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fileStatus), context);
}
}
if (ignoreFailures) {
for (Text value : stubContext.getWriter().values()) {
Assert.assertTrue(value.toString() + " is not skipped", value.toString().startsWith("FAIL:"));
}
}
Assert.assertTrue("There should have been an exception.", ignoreFailures);
}
catch (Exception e) {
Assert.assertTrue("Unexpected exception: " + e.getMessage(),
!ignoreFailures);
e.printStackTrace();
}
}
private static void deleteState() throws IOException {
pathList.clear();
nFiles = 0;
cluster.getFileSystem().delete(new Path(SOURCE_PATH), true);
cluster.getFileSystem().delete(new Path(TARGET_PATH), true);
}
@Test(timeout=40000)
public void testPreserveBlockSizeAndReplication() {
testPreserveBlockSizeAndReplicationImpl(true);
testPreserveBlockSizeAndReplicationImpl(false);
}
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
try {
deleteState();
createSourceDataWithDifferentBlockSize();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= EnumSet.noneOf(DistCpOptions.FileAttribute.class);
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fileStatus), context);
}
Assert.fail("Copy should have failed because of block-size difference.");
}
catch (Exception exception) {
// Check that the exception suggests the use of -pb/-skipCrc.
Assert.assertTrue("Failure exception should have suggested the use of -pb.", exception.getCause().getCause().getMessage().contains("pb"));
Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.", exception.getCause().getCause().getMessage().contains("skipCrc"));
}
}
private void testPreserveBlockSizeAndReplicationImpl(boolean preserve){
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= EnumSet.noneOf(DistCpOptions.FileAttribute.class);
if (preserve) {
fileAttributes.add(DistCpOptions.FileAttribute.BLOCKSIZE);
fileAttributes.add(DistCpOptions.FileAttribute.REPLICATION);
}
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fileStatus), context);
}
// Check that the block-size/replication aren't preserved.
for (Path path : pathList) {
final Path targetPath = new Path(path.toString()
.replaceAll(SOURCE_PATH, TARGET_PATH));
final FileStatus source = fs.getFileStatus(path);
final FileStatus target = fs.getFileStatus(targetPath);
if (!source.isDirectory() ) {
Assert.assertTrue(preserve ||
source.getBlockSize() != target.getBlockSize());
Assert.assertTrue(preserve ||
source.getReplication() != target.getReplication());
Assert.assertTrue(!preserve ||
source.getBlockSize() == target.getBlockSize());
Assert.assertTrue(!preserve ||
source.getReplication() == target.getReplication());
}
}
}
catch (Exception e) {
Assert.assertTrue("Unexpected exception: " + e.getMessage(), false);
e.printStackTrace();
}
}
private static void changeUserGroup(String user, String group)
throws IOException {
FileSystem fs = cluster.getFileSystem();
FsPermission changedPermission = new FsPermission(
FsAction.ALL, FsAction.ALL, FsAction.ALL
);
for (Path path : pathList)
if (fs.isFile(path)) {
fs.setOwner(path, user, group);
fs.setPermission(path, changedPermission);
}
}
/**
* If a single file is being copied to a location where the file (of the same
* name) already exists, then the file shouldn't be skipped.
*/
@Test(timeout=40000)
public void testSingleFileCopy() {
try {
deleteState();
touchFile(SOURCE_PATH + "/1");
Path sourceFilePath = pathList.get(0);
Path targetFilePath = new Path(sourceFilePath.toString().replaceAll(
SOURCE_PATH, TARGET_PATH));
touchFile(targetFilePath.toString());
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
context.getConfiguration().set(
DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
targetFilePath.getParent().toString()); // Parent directory.
copyMapper.setup(context);
final CopyListingFileStatus sourceFileStatus = new CopyListingFileStatus(
fs.getFileStatus(sourceFilePath));
long before = fs.getFileStatus(targetFilePath).getModificationTime();
copyMapper.map(new Text(DistCpUtils.getRelativePath(
new Path(SOURCE_PATH), sourceFilePath)), sourceFileStatus, context);
long after = fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been skipped", before == after);
context.getConfiguration().set(
DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
targetFilePath.toString()); // Specify the file path.
copyMapper.setup(context);
before = fs.getFileStatus(targetFilePath).getModificationTime();
try { Thread.sleep(2); } catch (Throwable ignore) {}
copyMapper.map(new Text(DistCpUtils.getRelativePath(
new Path(SOURCE_PATH), sourceFilePath)), sourceFileStatus, context);
after = fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been overwritten.", before < after);
} catch (Exception exception) {
Assert.fail("Unexpected exception: " + exception.getMessage());
exception.printStackTrace();
}
}
@Test(timeout=40000)
public void testPreserveUserGroup() {
testPreserveUserGroupImpl(true);
testPreserveUserGroupImpl(false);
}
private void testPreserveUserGroupImpl(boolean preserve){
try {
deleteState();
createSourceData();
changeUserGroup("Michael", "Corleone");
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= EnumSet.noneOf(DistCpOptions.FileAttribute.class);
if (preserve) {
fileAttributes.add(DistCpOptions.FileAttribute.USER);
fileAttributes.add(DistCpOptions.FileAttribute.GROUP);
fileAttributes.add(DistCpOptions.FileAttribute.PERMISSION);
}
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fileStatus), context);
}
// Check that the user/group attributes are preserved
// (only) as necessary.
for (Path path : pathList) {
final Path targetPath = new Path(path.toString()
.replaceAll(SOURCE_PATH, TARGET_PATH));
final FileStatus source = fs.getFileStatus(path);
final FileStatus target = fs.getFileStatus(targetPath);
if (!source.isDirectory()) {
Assert.assertTrue(!preserve || source.getOwner().equals(target.getOwner()));
Assert.assertTrue(!preserve || source.getGroup().equals(target.getGroup()));
Assert.assertTrue(!preserve || source.getPermission().equals(target.getPermission()));
Assert.assertTrue( preserve || !source.getOwner().equals(target.getOwner()));
Assert.assertTrue( preserve || !source.getGroup().equals(target.getGroup()));
Assert.assertTrue( preserve || !source.getPermission().equals(target.getPermission()));
Assert.assertTrue(source.isDirectory() ||
source.getReplication() != target.getReplication());
}
}
}
catch (Exception e) {
Assert.assertTrue("Unexpected exception: " + e.getMessage(), false);
e.printStackTrace();
}
}
}
| 38,974 | 36.729913 | 154 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred.lib;
import org.apache.hadoop.tools.DistCpConstants;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.tools.CopyListing;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.StubContext;
import org.apache.hadoop.security.Credentials;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.DataOutputStream;
import java.util.ArrayList;
import java.util.List;
public class TestDynamicInputFormat {
private static final Log LOG = LogFactory.getLog(TestDynamicInputFormat.class);
private static MiniDFSCluster cluster;
private static final int N_FILES = 1000;
private static final int NUM_SPLITS = 7;
private static final Credentials CREDENTIALS = new Credentials();
private static List<String> expectedFilePaths = new ArrayList<String>(N_FILES);
@BeforeClass
public static void setup() throws Exception {
cluster = new MiniDFSCluster.Builder(getConfigurationForCluster())
.numDataNodes(1).format(true).build();
for (int i=0; i<N_FILES; ++i)
createFile("/tmp/source/" + String.valueOf(i));
FileSystem fileSystem = cluster.getFileSystem();
expectedFilePaths.add(fileSystem.listStatus(
new Path("/tmp/source/0"))[0].getPath().getParent().toString());
}
private static Configuration getConfigurationForCluster() {
Configuration configuration = new Configuration();
System.setProperty("test.build.data",
"target/tmp/build/TEST_DYNAMIC_INPUT_FORMAT/data");
configuration.set("hadoop.log.dir", "target/tmp");
LOG.debug("fs.default.name == " + configuration.get("fs.default.name"));
LOG.debug("dfs.http.address == " + configuration.get("dfs.http.address"));
return configuration;
}
private static DistCpOptions getOptions() throws Exception {
Path sourcePath = new Path(cluster.getFileSystem().getUri().toString()
+ "/tmp/source");
Path targetPath = new Path(cluster.getFileSystem().getUri().toString()
+ "/tmp/target");
List<Path> sourceList = new ArrayList<Path>();
sourceList.add(sourcePath);
DistCpOptions options = new DistCpOptions(sourceList, targetPath);
options.setMaxMaps(NUM_SPLITS);
return options;
}
private static void createFile(String path) throws Exception {
FileSystem fileSystem = null;
DataOutputStream outputStream = null;
try {
fileSystem = cluster.getFileSystem();
outputStream = fileSystem.create(new Path(path), true, 0);
expectedFilePaths.add(fileSystem.listStatus(
new Path(path))[0].getPath().toString());
}
finally {
IOUtils.cleanup(null, fileSystem, outputStream);
}
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
@Test
public void testGetSplits() throws Exception {
DistCpOptions options = getOptions();
Configuration configuration = new Configuration();
configuration.set("mapred.map.tasks",
String.valueOf(options.getMaxMaps()));
CopyListing.getCopyListing(configuration, CREDENTIALS, options).buildListing(
new Path(cluster.getFileSystem().getUri().toString()
+"/tmp/testDynInputFormat/fileList.seq"), options);
JobContext jobContext = new JobContextImpl(configuration, new JobID());
DynamicInputFormat<Text, CopyListingFileStatus> inputFormat =
new DynamicInputFormat<Text, CopyListingFileStatus>();
List<InputSplit> splits = inputFormat.getSplits(jobContext);
int nFiles = 0;
int taskId = 0;
for (InputSplit split : splits) {
RecordReader<Text, CopyListingFileStatus> recordReader =
inputFormat.createRecordReader(split, null);
StubContext stubContext = new StubContext(jobContext.getConfiguration(),
recordReader, taskId);
final TaskAttemptContext taskAttemptContext
= stubContext.getContext();
recordReader.initialize(splits.get(0), taskAttemptContext);
float previousProgressValue = 0f;
while (recordReader.nextKeyValue()) {
CopyListingFileStatus fileStatus = recordReader.getCurrentValue();
String source = fileStatus.getPath().toString();
System.out.println(source);
Assert.assertTrue(expectedFilePaths.contains(source));
final float progress = recordReader.getProgress();
Assert.assertTrue(progress >= previousProgressValue);
Assert.assertTrue(progress >= 0.0f);
Assert.assertTrue(progress <= 1.0f);
previousProgressValue = progress;
++nFiles;
}
Assert.assertTrue(recordReader.getProgress() == 1.0f);
++taskId;
}
Assert.assertEquals(expectedFilePaths.size(), nFiles);
}
@Test
public void testGetSplitRatio() throws Exception {
Assert.assertEquals(1, DynamicInputFormat.getSplitRatio(1, 1000000000));
Assert.assertEquals(2, DynamicInputFormat.getSplitRatio(11000000, 10));
Assert.assertEquals(4, DynamicInputFormat.getSplitRatio(30, 700));
Assert.assertEquals(2, DynamicInputFormat.getSplitRatio(30, 200));
// Tests with negative value configuration
Configuration conf = new Configuration();
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE, -1);
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL, -1);
conf.setInt(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK, -1);
conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO, -1);
Assert.assertEquals(1,
DynamicInputFormat.getSplitRatio(1, 1000000000, conf));
Assert.assertEquals(2,
DynamicInputFormat.getSplitRatio(11000000, 10, conf));
Assert.assertEquals(4, DynamicInputFormat.getSplitRatio(30, 700, conf));
Assert.assertEquals(2, DynamicInputFormat.getSplitRatio(30, 200, conf));
// Tests with valid configuration
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE, 100);
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL, 30);
conf.setInt(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK, 10);
conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO, 53);
Assert.assertEquals(53, DynamicInputFormat.getSplitRatio(3, 200, conf));
}
}
| 7,565 | 39.677419 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DistCp is a tool for replicating data using MapReduce jobs for concurrent
* copy operations.
*
* @version 2
*/
package org.apache.hadoop.tools;
| 963 | 36.076923 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
|
package org.apache.hadoop.tools;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Utility class to hold commonly used constants.
*/
public class DistCpConstants {
/* Default number of threads to use for building file listing */
public static final int DEFAULT_LISTSTATUS_THREADS = 1;
/* Default number of maps to use for DistCp */
public static final int DEFAULT_MAPS = 20;
/* Default bandwidth if none specified */
public static final int DEFAULT_BANDWIDTH_MB = 100;
/* Default strategy for copying. Implementation looked up
from distcp-default.xml
*/
public static final String UNIFORMSIZE = "uniformsize";
/**
* Constants mapping to command line switches/input options
*/
public static final String CONF_LABEL_ATOMIC_COPY = "distcp.atomic.copy";
public static final String CONF_LABEL_WORK_PATH = "distcp.work.path";
public static final String CONF_LABEL_LOG_PATH = "distcp.log.path";
public static final String CONF_LABEL_IGNORE_FAILURES = "distcp.ignore.failures";
public static final String CONF_LABEL_PRESERVE_STATUS = "distcp.preserve.status";
public static final String CONF_LABEL_PRESERVE_RAWXATTRS =
"distcp.preserve.rawxattrs";
public static final String CONF_LABEL_SYNC_FOLDERS = "distcp.sync.folders";
public static final String CONF_LABEL_DELETE_MISSING = "distcp.delete.missing.source";
public static final String CONF_LABEL_SSL_CONF = "distcp.keystore.resource";
public static final String CONF_LABEL_LISTSTATUS_THREADS = "distcp.liststatus.threads";
public static final String CONF_LABEL_MAX_MAPS = "distcp.max.maps";
public static final String CONF_LABEL_SOURCE_LISTING = "distcp.source.listing";
public static final String CONF_LABEL_COPY_STRATEGY = "distcp.copy.strategy";
public static final String CONF_LABEL_SKIP_CRC = "distcp.skip.crc";
public static final String CONF_LABEL_OVERWRITE = "distcp.copy.overwrite";
public static final String CONF_LABEL_APPEND = "distcp.copy.append";
public static final String CONF_LABEL_DIFF = "distcp.copy.diff";
public static final String CONF_LABEL_BANDWIDTH_MB = "distcp.map.bandwidth.mb";
public static final String CONF_LABEL_FILTERS_FILE =
"distcp.filters.file";
public static final String CONF_LABEL_MAX_CHUNKS_TOLERABLE =
"distcp.dynamic.max.chunks.tolerable";
public static final String CONF_LABEL_MAX_CHUNKS_IDEAL =
"distcp.dynamic.max.chunks.ideal";
public static final String CONF_LABEL_MIN_RECORDS_PER_CHUNK =
"distcp.dynamic.min.records_per_chunk";
public static final String CONF_LABEL_SPLIT_RATIO =
"distcp.dynamic.split.ratio";
/* Total bytes to be copied. Updated by copylisting. Unfiltered count */
public static final String CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED = "mapred.total.bytes.expected";
/* Total number of paths to copy, includes directories. Unfiltered count */
public static final String CONF_LABEL_TOTAL_NUMBER_OF_RECORDS = "mapred.number.of.records";
/* SSL keystore resource */
public static final String CONF_LABEL_SSL_KEYSTORE = "dfs.https.client.keystore.resource";
/* If input is based -f <<source listing>>, file containing the src paths */
public static final String CONF_LABEL_LISTING_FILE_PATH = "distcp.listing.file.path";
/* Directory where the mapreduce job will write to. If not atomic commit, then same
as CONF_LABEL_TARGET_FINAL_PATH
*/
public static final String CONF_LABEL_TARGET_WORK_PATH = "distcp.target.work.path";
/* Directory where the final data will be committed to. If not atomic commit, then same
as CONF_LABEL_TARGET_WORK_PATH
*/
public static final String CONF_LABEL_TARGET_FINAL_PATH = "distcp.target.final.path";
/* Boolean to indicate whether the target of distcp exists. */
public static final String CONF_LABEL_TARGET_PATH_EXISTS = "distcp.target.path.exists";
/**
* DistCp job id for consumers of the Disctp
*/
public static final String CONF_LABEL_DISTCP_JOB_ID = "distcp.job.id";
/* Meta folder where the job's intermediate data is kept */
public static final String CONF_LABEL_META_FOLDER = "distcp.meta.folder";
/* DistCp CopyListing class override param */
public static final String CONF_LABEL_COPY_LISTING_CLASS = "distcp.copy.listing.class";
/**
* Conf label for SSL Trust-store location.
*/
public static final String CONF_LABEL_SSL_TRUST_STORE_LOCATION
= "ssl.client.truststore.location";
/**
* Conf label for SSL Key-store location.
*/
public static final String CONF_LABEL_SSL_KEY_STORE_LOCATION
= "ssl.client.keystore.location";
/**
* Constants for DistCp return code to shell / consumer of ToolRunner's run
*/
public static final int SUCCESS = 0;
public static final int INVALID_ARGUMENT = -1;
public static final int DUPLICATE_INPUT = -2;
public static final int ACLS_NOT_SUPPORTED = -3;
public static final int XATTRS_NOT_SUPPORTED = -4;
public static final int UNKNOWN_ERROR = -999;
/**
* Constants for DistCp default values of configurable values
*/
public static final int MAX_CHUNKS_TOLERABLE_DEFAULT = 400;
public static final int MAX_CHUNKS_IDEAL_DEFAULT = 100;
public static final int MIN_RECORDS_PER_CHUNK_DEFAULT = 5;
public static final int SPLIT_RATIO_DEFAULT = 2;
/**
* Value of reserved raw HDFS directory when copying raw.* xattrs.
*/
static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
}
| 6,293 | 42.109589 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.cli.Option;
import org.apache.hadoop.conf.Configuration;
/**
* Enumeration mapping configuration keys to distcp command line
* options.
*/
public enum DistCpOptionSwitch {
/**
* Ignores any failures during copy, and continues with rest.
* Logs failures in a file
*/
IGNORE_FAILURES(DistCpConstants.CONF_LABEL_IGNORE_FAILURES,
new Option("i", false, "Ignore failures during copy")),
/**
* Preserves status of file/path in the target.
* Default behavior with -p, is to preserve replication,
* block size, user, group, permission, checksum type and timestamps on the
* target file. Note that when preserving checksum type, block size is also
* preserved.
*
* If any of the optional switches are present among rbugpcaxt, then
* only the corresponding file attribute is preserved.
*/
PRESERVE_STATUS(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
new Option("p", true, "preserve status (rbugpcaxt)(replication, " +
"block-size, user, group, permission, checksum-type, ACL, XATTR, " +
"timestamps). If -p is specified with no <arg>, then preserves " +
"replication, block size, user, group, permission, checksum type " +
"and timestamps. " +
"raw.* xattrs are preserved when both the source and destination " +
"paths are in the /.reserved/raw hierarchy (HDFS only). raw.* xattr" +
"preservation is independent of the -p flag. " +
"Refer to the DistCp documentation for more details.")),
/**
* Update target location by copying only files that are missing
* in the target. This can be used to periodically sync two folders
* across source and target. Typically used with DELETE_MISSING
* Incompatible with ATOMIC_COMMIT
*/
SYNC_FOLDERS(DistCpConstants.CONF_LABEL_SYNC_FOLDERS,
new Option("update", false, "Update target, copying only missing" +
"files or directories")),
/**
* Deletes missing files in target that are missing from source
* This allows the target to be in sync with the source contents
* Typically used in conjunction with SYNC_FOLDERS
* Incompatible with ATOMIC_COMMIT
*/
DELETE_MISSING(DistCpConstants.CONF_LABEL_DELETE_MISSING,
new Option("delete", false, "Delete from target, " +
"files missing in source")),
/**
* Configuration file to use with hftps:// for securely copying
* files across clusters. Typically the configuration file contains
* truststore/keystore information such as location, password and type
*/
SSL_CONF(DistCpConstants.CONF_LABEL_SSL_CONF,
new Option("mapredSslConf", true, "Configuration for ssl config file" +
", to use with hftps://")),
/**
* Number of threads for building source file listing (before map-reduce
* phase, max one listStatus per thread at a time).
*/
NUM_LISTSTATUS_THREADS(DistCpConstants.CONF_LABEL_LISTSTATUS_THREADS,
new Option("numListstatusThreads", true, "Number of threads to " +
"use for building file listing (max " +
DistCpOptions.maxNumListstatusThreads + ").")),
/**
* Max number of maps to use during copy. DistCp will split work
* as equally as possible among these maps
*/
MAX_MAPS(DistCpConstants.CONF_LABEL_MAX_MAPS,
new Option("m", true, "Max number of concurrent maps to use for copy")),
/**
* Source file listing can be provided to DistCp in a file.
* This allows DistCp to copy random list of files from source
* and copy them to target
*/
SOURCE_FILE_LISTING(DistCpConstants.CONF_LABEL_SOURCE_LISTING,
new Option("f", true, "List of files that need to be copied")),
/**
* Copy all the source files and commit them atomically to the target
* This is typically useful in cases where there is a process
* polling for availability of a file/dir. This option is incompatible
* with SYNC_FOLDERS and DELETE_MISSING
*/
ATOMIC_COMMIT(DistCpConstants.CONF_LABEL_ATOMIC_COPY,
new Option("atomic", false, "Commit all changes or none")),
/**
* Work path to be used only in conjunction in Atomic commit
*/
WORK_PATH(DistCpConstants.CONF_LABEL_WORK_PATH,
new Option("tmp", true, "Intermediate work path to be used for atomic commit")),
/**
* Log path where distcp output logs are written to
*/
LOG_PATH(DistCpConstants.CONF_LABEL_LOG_PATH,
new Option("log", true, "Folder on DFS where distcp execution logs are saved")),
/**
* Copy strategy is use. This could be dynamic or uniform size etc.
* DistCp would use an appropriate input format based on this.
*/
COPY_STRATEGY(DistCpConstants.CONF_LABEL_COPY_STRATEGY,
new Option("strategy", true, "Copy strategy to use. Default is " +
"dividing work based on file sizes")),
/**
* Skip CRC checks between source and target, when determining what
* files need to be copied.
*/
SKIP_CRC(DistCpConstants.CONF_LABEL_SKIP_CRC,
new Option("skipcrccheck", false, "Whether to skip CRC checks between " +
"source and target paths.")),
/**
* Overwrite target-files unconditionally.
*/
OVERWRITE(DistCpConstants.CONF_LABEL_OVERWRITE,
new Option("overwrite", false, "Choose to overwrite target files " +
"unconditionally, even if they exist.")),
APPEND(DistCpConstants.CONF_LABEL_APPEND,
new Option("append", false,
"Reuse existing data in target files and append new data to them if possible")),
DIFF(DistCpConstants.CONF_LABEL_DIFF,
new Option("diff", false,
"Use snapshot diff report to identify the difference between source and target"),
2),
/**
* Should DisctpExecution be blocking
*/
BLOCKING("",
new Option("async", false, "Should distcp execution be blocking")),
FILE_LIMIT("",
new Option("filelimit", true, "(Deprecated!) Limit number of files " +
"copied to <= n")),
SIZE_LIMIT("",
new Option("sizelimit", true, "(Deprecated!) Limit number of files " +
"copied to <= n bytes")),
/**
* Specify bandwidth per map in MB
*/
BANDWIDTH(DistCpConstants.CONF_LABEL_BANDWIDTH_MB,
new Option("bandwidth", true, "Specify bandwidth per map in MB")),
/**
* Path containing a list of strings, which when found in the path of
* a file to be copied excludes that file from the copy job.
*/
FILTERS(DistCpConstants.CONF_LABEL_FILTERS_FILE,
new Option("filters", true, "The path to a file containing a list of"
+ " strings for paths to be excluded from the copy."));
public static final String PRESERVE_STATUS_DEFAULT = "-prbugpct";
private final String confLabel;
private final Option option;
DistCpOptionSwitch(String confLabel, Option option) {
this.confLabel = confLabel;
this.option = option;
}
DistCpOptionSwitch(String confLabel, Option option, int argNum) {
this(confLabel, option);
this.option.setArgs(argNum);
}
/**
* Get Configuration label for the option
* @return configuration label name
*/
public String getConfigLabel() {
return confLabel;
}
/**
* Get CLI Option corresponding to the distcp option
* @return option
*/
public Option getOption() {
return option;
}
/**
* Get Switch symbol
* @return switch symbol char
*/
public String getSwitch() {
return option.getOpt();
}
@Override
public String toString() {
return super.name() + " {" +
"confLabel='" + confLabel + '\'' +
", option=" + option + '}';
}
/**
* Helper function to add an option to hadoop configuration object
* @param conf - Configuration object to include the option
* @param option - Option to add
* @param value - Value
*/
public static void addToConf(Configuration conf,
DistCpOptionSwitch option,
String value) {
conf.set(option.getConfigLabel(), value);
}
/**
* Helper function to set an option to hadoop configuration object
* @param conf - Configuration object to include the option
* @param option - Option to add
*/
public static void addToConf(Configuration conf,
DistCpOptionSwitch option) {
conf.set(option.getConfigLabel(), "true");
}
}
| 9,177 | 34.573643 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.tools.util.ProducerConsumer;
import org.apache.hadoop.tools.util.WorkReport;
import org.apache.hadoop.tools.util.WorkRequest;
import org.apache.hadoop.tools.util.WorkRequestProcessor;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.security.Credentials;
import com.google.common.annotations.VisibleForTesting;
import java.io.*;
import java.util.ArrayList;
import static org.apache.hadoop.tools.DistCpConstants
.HDFS_RESERVED_RAW_DIRECTORY_NAME;
/**
* The SimpleCopyListing is responsible for making the exhaustive list of
* all files/directories under its specified list of input-paths.
* These are written into the specified copy-listing file.
* Note: The SimpleCopyListing doesn't handle wild-cards in the input-paths.
*/
public class SimpleCopyListing extends CopyListing {
private static final Log LOG = LogFactory.getLog(SimpleCopyListing.class);
private long totalPaths = 0;
private long totalDirs = 0;
private long totalBytesToCopy = 0;
private int numListstatusThreads = 1;
private final int maxRetries = 3;
private CopyFilter copyFilter;
/**
* Protected constructor, to initialize configuration.
*
* @param configuration The input configuration, with which the source/target FileSystems may be accessed.
* @param credentials - Credentials object on which the FS delegation tokens are cached. If null
* delegation token caching is skipped
*/
protected SimpleCopyListing(Configuration configuration, Credentials credentials) {
super(configuration, credentials);
numListstatusThreads = getConf().getInt(
DistCpConstants.CONF_LABEL_LISTSTATUS_THREADS,
DistCpConstants.DEFAULT_LISTSTATUS_THREADS);
copyFilter = CopyFilter.getCopyFilter(getConf());
copyFilter.initialize();
}
@VisibleForTesting
protected SimpleCopyListing(Configuration configuration, Credentials credentials,
int numListstatusThreads) {
super(configuration, credentials);
this.numListstatusThreads = numListstatusThreads;
}
@Override
protected void validatePaths(DistCpOptions options)
throws IOException, InvalidInputException {
Path targetPath = options.getTargetPath();
FileSystem targetFS = targetPath.getFileSystem(getConf());
boolean targetIsFile = targetFS.isFile(targetPath);
targetPath = targetFS.makeQualified(targetPath);
final boolean targetIsReservedRaw =
Path.getPathWithoutSchemeAndAuthority(targetPath).toString().
startsWith(HDFS_RESERVED_RAW_DIRECTORY_NAME);
//If target is a file, then source has to be single file
if (targetIsFile) {
if (options.getSourcePaths().size() > 1) {
throw new InvalidInputException("Multiple source being copied to a file: " +
targetPath);
}
Path srcPath = options.getSourcePaths().get(0);
FileSystem sourceFS = srcPath.getFileSystem(getConf());
if (!sourceFS.isFile(srcPath)) {
throw new InvalidInputException("Cannot copy " + srcPath +
", which is not a file to " + targetPath);
}
}
if (options.shouldAtomicCommit() && targetFS.exists(targetPath)) {
throw new InvalidInputException("Target path for atomic-commit already exists: " +
targetPath + ". Cannot atomic-commit to pre-existing target-path.");
}
for (Path path: options.getSourcePaths()) {
FileSystem fs = path.getFileSystem(getConf());
if (!fs.exists(path)) {
throw new InvalidInputException(path + " doesn't exist");
}
if (Path.getPathWithoutSchemeAndAuthority(path).toString().
startsWith(HDFS_RESERVED_RAW_DIRECTORY_NAME)) {
if (!targetIsReservedRaw) {
final String msg = "The source path '" + path + "' starts with " +
HDFS_RESERVED_RAW_DIRECTORY_NAME + " but the target path '" +
targetPath + "' does not. Either all or none of the paths must " +
"have this prefix.";
throw new InvalidInputException(msg);
}
} else if (targetIsReservedRaw) {
final String msg = "The target path '" + targetPath + "' starts with " +
HDFS_RESERVED_RAW_DIRECTORY_NAME + " but the source path '" +
path + "' does not. Either all or none of the paths must " +
"have this prefix.";
throw new InvalidInputException(msg);
}
}
if (targetIsReservedRaw) {
options.preserveRawXattrs();
getConf().setBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, true);
}
/* This is requires to allow map tasks to access each of the source
clusters. This would retrieve the delegation token for each unique
file system and add them to job's private credential store
*/
Credentials credentials = getCredentials();
if (credentials != null) {
Path[] inputPaths = options.getSourcePaths().toArray(new Path[1]);
TokenCache.obtainTokensForNamenodes(credentials, inputPaths, getConf());
}
}
/** {@inheritDoc} */
@Override
public void doBuildListing(Path pathToListingFile, DistCpOptions options) throws IOException {
doBuildListing(getWriter(pathToListingFile), options);
}
/**
* Collect the list of
* {@literal <sourceRelativePath, sourceFileStatus>}
* to be copied and write to the sequence file. In essence, any file or
* directory that need to be copied or sync-ed is written as an entry to the
* sequence file, with the possible exception of the source root:
* when either -update (sync) or -overwrite switch is specified, and if
* the the source root is a directory, then the source root entry is not
* written to the sequence file, because only the contents of the source
* directory need to be copied in this case.
* See {@link org.apache.hadoop.tools.util.DistCpUtils#getRelativePath} for
* how relative path is computed.
* See computeSourceRootPath method for how the root path of the source is
* computed.
* @param fileListWriter
* @param options
* @throws IOException
*/
@VisibleForTesting
public void doBuildListing(SequenceFile.Writer fileListWriter,
DistCpOptions options) throws IOException {
if (options.getNumListstatusThreads() > 0) {
numListstatusThreads = options.getNumListstatusThreads();
}
try {
for (Path path: options.getSourcePaths()) {
FileSystem sourceFS = path.getFileSystem(getConf());
final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
final boolean preserveRawXAttrs = options.shouldPreserveRawXattrs();
path = makeQualified(path);
FileStatus rootStatus = sourceFS.getFileStatus(path);
Path sourcePathRoot = computeSourceRootPath(rootStatus, options);
FileStatus[] sourceFiles = sourceFS.listStatus(path);
boolean explore = (sourceFiles != null && sourceFiles.length > 0);
if (!explore || rootStatus.isDirectory()) {
CopyListingFileStatus rootCopyListingStatus =
DistCpUtils.toCopyListingFileStatus(sourceFS, rootStatus,
preserveAcls, preserveXAttrs, preserveRawXAttrs);
writeToFileListingRoot(fileListWriter, rootCopyListingStatus,
sourcePathRoot, options);
}
if (explore) {
ArrayList<FileStatus> sourceDirs = new ArrayList<FileStatus>();
for (FileStatus sourceStatus: sourceFiles) {
if (LOG.isDebugEnabled()) {
LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy.");
}
CopyListingFileStatus sourceCopyListingStatus =
DistCpUtils.toCopyListingFileStatus(sourceFS, sourceStatus,
preserveAcls && sourceStatus.isDirectory(),
preserveXAttrs && sourceStatus.isDirectory(),
preserveRawXAttrs && sourceStatus.isDirectory());
writeToFileListing(fileListWriter, sourceCopyListingStatus,
sourcePathRoot);
if (sourceStatus.isDirectory()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding source dir for traverse: " + sourceStatus.getPath());
}
sourceDirs.add(sourceStatus);
}
}
traverseDirectory(fileListWriter, sourceFS, sourceDirs,
sourcePathRoot, options);
}
}
fileListWriter.close();
printStats();
LOG.info("Build file listing completed.");
fileListWriter = null;
} finally {
IOUtils.cleanup(LOG, fileListWriter);
}
}
private Path computeSourceRootPath(FileStatus sourceStatus,
DistCpOptions options) throws IOException {
Path target = options.getTargetPath();
FileSystem targetFS = target.getFileSystem(getConf());
final boolean targetPathExists = options.getTargetPathExists();
boolean solitaryFile = options.getSourcePaths().size() == 1
&& !sourceStatus.isDirectory();
if (solitaryFile) {
if (targetFS.isFile(target) || !targetPathExists) {
return sourceStatus.getPath();
} else {
return sourceStatus.getPath().getParent();
}
} else {
boolean specialHandling = (options.getSourcePaths().size() == 1 && !targetPathExists) ||
options.shouldSyncFolder() || options.shouldOverwrite();
return specialHandling && sourceStatus.isDirectory() ? sourceStatus.getPath() :
sourceStatus.getPath().getParent();
}
}
/**
* Provide an option to skip copy of a path, Allows for exclusion
* of files such as {@link org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter#SUCCEEDED_FILE_NAME}
* @param path - Path being considered for copy while building the file listing
* @return - True if the path should be considered for copy, false otherwise
*/
protected boolean shouldCopy(Path path) {
return copyFilter.shouldCopy(path);
}
/** {@inheritDoc} */
@Override
protected long getBytesToCopy() {
return totalBytesToCopy;
}
/** {@inheritDoc} */
@Override
protected long getNumberOfPaths() {
return totalPaths;
}
private Path makeQualified(Path path) throws IOException {
final FileSystem fs = path.getFileSystem(getConf());
return path.makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException {
FileSystem fs = pathToListFile.getFileSystem(getConf());
if (fs.exists(pathToListFile)) {
fs.delete(pathToListFile, false);
}
return SequenceFile.createWriter(getConf(),
SequenceFile.Writer.file(pathToListFile),
SequenceFile.Writer.keyClass(Text.class),
SequenceFile.Writer.valueClass(CopyListingFileStatus.class),
SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE));
}
/*
* Private class to implement WorkRequestProcessor interface. It processes
* each directory (represented by FileStatus item) and returns a list of all
* file-system objects in that directory (files and directories). In case of
* retriable exceptions it increments retry counter and returns the same
* directory for later retry.
*/
private static class FileStatusProcessor
implements WorkRequestProcessor<FileStatus, FileStatus[]> {
private FileSystem fileSystem;
public FileStatusProcessor(FileSystem fileSystem) {
this.fileSystem = fileSystem;
}
/*
* Processor for FileSystem.listStatus().
*
* @param workRequest Input work item that contains FileStatus item which
* is a parent directory we want to list.
* @return Outputs WorkReport<FileStatus[]> with a list of objects in the
* directory (array of objects, empty if parent directory is
* empty). In case of intermittent exception we increment retry
* counter and return the list containing the parent directory).
*/
public WorkReport<FileStatus[]> processItem(
WorkRequest<FileStatus> workRequest) {
FileStatus parent = workRequest.getItem();
int retry = workRequest.getRetry();
WorkReport<FileStatus[]> result = null;
try {
if (retry > 0) {
int sleepSeconds = 2;
for (int i = 1; i < retry; i++) {
sleepSeconds *= 2;
}
try {
Thread.sleep(1000 * sleepSeconds);
} catch (InterruptedException ie) {
LOG.debug("Interrupted while sleeping in exponential backoff.");
}
}
result = new WorkReport<FileStatus[]>(
fileSystem.listStatus(parent.getPath()), retry, true);
} catch (FileNotFoundException fnf) {
LOG.error("FileNotFoundException exception in listStatus: " +
fnf.getMessage());
result = new WorkReport<FileStatus[]>(new FileStatus[0], retry, true,
fnf);
} catch (Exception e) {
LOG.error("Exception in listStatus. Will send for retry.");
FileStatus[] parentList = new FileStatus[1];
parentList[0] = parent;
result = new WorkReport<FileStatus[]>(parentList, retry + 1, false, e);
}
return result;
}
}
private void printStats() {
LOG.info("Paths (files+dirs) cnt = " + totalPaths +
"; dirCnt = " + totalDirs);
}
private void maybePrintStats() {
if (totalPaths % 100000 == 0) {
printStats();
}
}
private void traverseDirectory(SequenceFile.Writer fileListWriter,
FileSystem sourceFS,
ArrayList<FileStatus> sourceDirs,
Path sourcePathRoot,
DistCpOptions options)
throws IOException {
final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
final boolean preserveRawXattrs = options.shouldPreserveRawXattrs();
assert numListstatusThreads > 0;
LOG.debug("Starting thread pool of " + numListstatusThreads +
" listStatus workers.");
ProducerConsumer<FileStatus, FileStatus[]> workers =
new ProducerConsumer<FileStatus, FileStatus[]>(numListstatusThreads);
for (int i = 0; i < numListstatusThreads; i++) {
workers.addWorker(
new FileStatusProcessor(sourcePathRoot.getFileSystem(getConf())));
}
for (FileStatus status : sourceDirs) {
workers.put(new WorkRequest<FileStatus>(status, 0));
}
while (workers.hasWork()) {
try {
WorkReport<FileStatus[]> workResult = workers.take();
int retry = workResult.getRetry();
for (FileStatus child: workResult.getItem()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Recording source-path: " + child.getPath() + " for copy.");
}
if (workResult.getSuccess()) {
CopyListingFileStatus childCopyListingStatus =
DistCpUtils.toCopyListingFileStatus(sourceFS, child,
preserveAcls && child.isDirectory(),
preserveXAttrs && child.isDirectory(),
preserveRawXattrs && child.isDirectory());
writeToFileListing(fileListWriter, childCopyListingStatus,
sourcePathRoot);
}
if (retry < maxRetries) {
if (child.isDirectory()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Traversing into source dir: " + child.getPath());
}
workers.put(new WorkRequest<FileStatus>(child, retry));
}
} else {
LOG.error("Giving up on " + child.getPath() +
" after " + retry + " retries.");
}
}
} catch (InterruptedException ie) {
LOG.error("Could not get item from childQueue. Retrying...");
}
}
workers.shutdown();
}
private void writeToFileListingRoot(SequenceFile.Writer fileListWriter,
CopyListingFileStatus fileStatus, Path sourcePathRoot,
DistCpOptions options) throws IOException {
boolean syncOrOverwrite = options.shouldSyncFolder() ||
options.shouldOverwrite();
if (fileStatus.getPath().equals(sourcePathRoot) &&
fileStatus.isDirectory() && syncOrOverwrite) {
// Skip the root-paths when syncOrOverwrite
if (LOG.isDebugEnabled()) {
LOG.debug("Skip " + fileStatus.getPath());
}
return;
}
writeToFileListing(fileListWriter, fileStatus, sourcePathRoot);
}
private void writeToFileListing(SequenceFile.Writer fileListWriter,
CopyListingFileStatus fileStatus,
Path sourcePathRoot) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("REL PATH: " + DistCpUtils.getRelativePath(sourcePathRoot,
fileStatus.getPath()) + ", FULL PATH: " + fileStatus.getPath());
}
if (!shouldCopy(fileStatus.getPath())) {
return;
}
fileListWriter.append(new Text(DistCpUtils.getRelativePath(sourcePathRoot,
fileStatus.getPath())), fileStatus);
fileListWriter.sync();
if (!fileStatus.isDirectory()) {
totalBytesToCopy += fileStatus.getLen();
} else {
totalDirs++;
}
totalPaths++;
maybePrintStats();
}
}
| 19,036 | 38.993697 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.util.DistCpUtils;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
/**
* The Options class encapsulates all DistCp options.
* These may be set from command-line (via the OptionsParser)
* or may be set manually.
*/
public class DistCpOptions {
private boolean atomicCommit = false;
private boolean syncFolder = false;
private boolean deleteMissing = false;
private boolean ignoreFailures = false;
private boolean overwrite = false;
private boolean append = false;
private boolean skipCRC = false;
private boolean blocking = true;
private boolean useDiff = false;
public static final int maxNumListstatusThreads = 40;
private int numListstatusThreads = 0; // Indicates that flag is not set.
private int maxMaps = DistCpConstants.DEFAULT_MAPS;
private int mapBandwidth = DistCpConstants.DEFAULT_BANDWIDTH_MB;
private String sslConfigurationFile;
private String copyStrategy = DistCpConstants.UNIFORMSIZE;
private EnumSet<FileAttribute> preserveStatus = EnumSet.noneOf(FileAttribute.class);
private boolean preserveRawXattrs;
private Path atomicWorkPath;
private Path logPath;
private Path sourceFileListing;
private List<Path> sourcePaths;
private String fromSnapshot;
private String toSnapshot;
private Path targetPath;
/**
* The path to a file containing a list of paths to filter out of the copy.
*/
private String filtersFile;
// targetPathExist is a derived field, it's initialized in the
// beginning of distcp.
private boolean targetPathExists = true;
public static enum FileAttribute{
REPLICATION, BLOCKSIZE, USER, GROUP, PERMISSION, CHECKSUMTYPE, ACL, XATTR, TIMES;
public static FileAttribute getAttribute(char symbol) {
for (FileAttribute attribute : values()) {
if (attribute.name().charAt(0) == Character.toUpperCase(symbol)) {
return attribute;
}
}
throw new NoSuchElementException("No attribute for " + symbol);
}
}
/**
* Constructor, to initialize source/target paths.
* @param sourcePaths List of source-paths (including wildcards)
* to be copied to target.
* @param targetPath Destination path for the dist-copy.
*/
public DistCpOptions(List<Path> sourcePaths, Path targetPath) {
assert sourcePaths != null && !sourcePaths.isEmpty() : "Invalid source paths";
assert targetPath != null : "Invalid Target path";
this.sourcePaths = sourcePaths;
this.targetPath = targetPath;
}
/**
* Constructor, to initialize source/target paths.
* @param sourceFileListing File containing list of source paths
* @param targetPath Destination path for the dist-copy.
*/
public DistCpOptions(Path sourceFileListing, Path targetPath) {
assert sourceFileListing != null : "Invalid source paths";
assert targetPath != null : "Invalid Target path";
this.sourceFileListing = sourceFileListing;
this.targetPath = targetPath;
}
/**
* Copy constructor.
* @param that DistCpOptions being copied from.
*/
public DistCpOptions(DistCpOptions that) {
if (this != that && that != null) {
this.atomicCommit = that.atomicCommit;
this.syncFolder = that.syncFolder;
this.deleteMissing = that.deleteMissing;
this.ignoreFailures = that.ignoreFailures;
this.overwrite = that.overwrite;
this.skipCRC = that.skipCRC;
this.blocking = that.blocking;
this.numListstatusThreads = that.numListstatusThreads;
this.maxMaps = that.maxMaps;
this.mapBandwidth = that.mapBandwidth;
this.sslConfigurationFile = that.getSslConfigurationFile();
this.copyStrategy = that.copyStrategy;
this.preserveStatus = that.preserveStatus;
this.preserveRawXattrs = that.preserveRawXattrs;
this.atomicWorkPath = that.getAtomicWorkPath();
this.logPath = that.getLogPath();
this.sourceFileListing = that.getSourceFileListing();
this.sourcePaths = that.getSourcePaths();
this.targetPath = that.getTargetPath();
this.targetPathExists = that.getTargetPathExists();
this.filtersFile = that.getFiltersFile();
}
}
/**
* Should the data be committed atomically?
*
* @return true if data should be committed automically. false otherwise
*/
public boolean shouldAtomicCommit() {
return atomicCommit;
}
/**
* Set if data need to be committed automatically
*
* @param atomicCommit - boolean switch
*/
public void setAtomicCommit(boolean atomicCommit) {
validate(DistCpOptionSwitch.ATOMIC_COMMIT, atomicCommit);
this.atomicCommit = atomicCommit;
}
/**
* Should the data be sync'ed between source and target paths?
*
* @return true if data should be sync'ed up. false otherwise
*/
public boolean shouldSyncFolder() {
return syncFolder;
}
/**
* Set if source and target folder contents be sync'ed up
*
* @param syncFolder - boolean switch
*/
public void setSyncFolder(boolean syncFolder) {
validate(DistCpOptionSwitch.SYNC_FOLDERS, syncFolder);
this.syncFolder = syncFolder;
}
/**
* Should target files missing in source should be deleted?
*
* @return true if zoombie target files to be removed. false otherwise
*/
public boolean shouldDeleteMissing() {
return deleteMissing;
}
/**
* Set if files only present in target should be deleted
*
* @param deleteMissing - boolean switch
*/
public void setDeleteMissing(boolean deleteMissing) {
validate(DistCpOptionSwitch.DELETE_MISSING, deleteMissing);
this.deleteMissing = deleteMissing;
}
/**
* Should failures be logged and ignored during copy?
*
* @return true if failures are to be logged and ignored. false otherwise
*/
public boolean shouldIgnoreFailures() {
return ignoreFailures;
}
/**
* Set if failures during copy be ignored
*
* @param ignoreFailures - boolean switch
*/
public void setIgnoreFailures(boolean ignoreFailures) {
this.ignoreFailures = ignoreFailures;
}
/**
* Should DistCp be running in blocking mode
*
* @return true if should run in blocking, false otherwise
*/
public boolean shouldBlock() {
return blocking;
}
/**
* Set if Disctp should run blocking or non-blocking
*
* @param blocking - boolean switch
*/
public void setBlocking(boolean blocking) {
this.blocking = blocking;
}
/**
* Should files be overwritten always?
*
* @return true if files in target that may exist before distcp, should always
* be overwritten. false otherwise
*/
public boolean shouldOverwrite() {
return overwrite;
}
/**
* Set if files should always be overwritten on target
*
* @param overwrite - boolean switch
*/
public void setOverwrite(boolean overwrite) {
validate(DistCpOptionSwitch.OVERWRITE, overwrite);
this.overwrite = overwrite;
}
/**
* @return whether we can append new data to target files
*/
public boolean shouldAppend() {
return append;
}
/**
* Set if we want to append new data to target files. This is valid only with
* update option and CRC is not skipped.
*/
public void setAppend(boolean append) {
validate(DistCpOptionSwitch.APPEND, append);
this.append = append;
}
public boolean shouldUseDiff() {
return this.useDiff;
}
public String getFromSnapshot() {
return this.fromSnapshot;
}
public String getToSnapshot() {
return this.toSnapshot;
}
public void setUseDiff(boolean useDiff, String fromSnapshot, String toSnapshot) {
validate(DistCpOptionSwitch.DIFF, useDiff);
this.useDiff = useDiff;
this.fromSnapshot = fromSnapshot;
this.toSnapshot = toSnapshot;
}
public void disableUsingDiff() {
this.useDiff = false;
}
/**
* Should CRC/checksum check be skipped while checking files are identical
*
* @return true if checksum check should be skipped while checking files are
* identical. false otherwise
*/
public boolean shouldSkipCRC() {
return skipCRC;
}
/**
* Set if checksum comparison should be skipped while determining if
* source and destination files are identical
*
* @param skipCRC - boolean switch
*/
public void setSkipCRC(boolean skipCRC) {
validate(DistCpOptionSwitch.SKIP_CRC, skipCRC);
this.skipCRC = skipCRC;
}
/** Get the number of threads to use for listStatus
*
* @return Number of threads to do listStatus
*/
public int getNumListstatusThreads() {
return numListstatusThreads;
}
/** Set the number of threads to use for listStatus. We allow max 40
* threads. Setting numThreads to zero signify we should use the value
* from conf properties.
*
* @param numThreads - Number of threads
*/
public void setNumListstatusThreads(int numThreads) {
if (numThreads > maxNumListstatusThreads) {
this.numListstatusThreads = maxNumListstatusThreads;
} else if (numThreads > 0) {
this.numListstatusThreads = numThreads;
} else {
this.numListstatusThreads = 0;
}
}
/** Get the max number of maps to use for this copy
*
* @return Max number of maps
*/
public int getMaxMaps() {
return maxMaps;
}
/**
* Set the max number of maps to use for copy
*
* @param maxMaps - Number of maps
*/
public void setMaxMaps(int maxMaps) {
this.maxMaps = Math.max(maxMaps, 1);
}
/** Get the map bandwidth in MB
*
* @return Bandwidth in MB
*/
public int getMapBandwidth() {
return mapBandwidth;
}
/**
* Set per map bandwidth
*
* @param mapBandwidth - per map bandwidth
*/
public void setMapBandwidth(int mapBandwidth) {
assert mapBandwidth > 0 : "Bandwidth " + mapBandwidth + " is invalid (should be > 0)";
this.mapBandwidth = mapBandwidth;
}
/**
* Get path where the ssl configuration file is present to use for hftps://
*
* @return Path on local file system
*/
public String getSslConfigurationFile() {
return sslConfigurationFile;
}
/**
* Set the SSL configuration file path to use with hftps:// (local path)
*
* @param sslConfigurationFile - Local ssl config file path
*/
public void setSslConfigurationFile(String sslConfigurationFile) {
this.sslConfigurationFile = sslConfigurationFile;
}
/**
* Returns an iterator with the list of file attributes to preserve
*
* @return iterator of file attributes to preserve
*/
public Iterator<FileAttribute> preserveAttributes() {
return preserveStatus.iterator();
}
/**
* Checks if the input attribute should be preserved or not
*
* @param attribute - Attribute to check
* @return True if attribute should be preserved, false otherwise
*/
public boolean shouldPreserve(FileAttribute attribute) {
return preserveStatus.contains(attribute);
}
/**
* Add file attributes that need to be preserved. This method may be
* called multiple times to add attributes.
*
* @param fileAttribute - Attribute to add, one at a time
*/
public void preserve(FileAttribute fileAttribute) {
for (FileAttribute attribute : preserveStatus) {
if (attribute.equals(fileAttribute)) {
return;
}
}
preserveStatus.add(fileAttribute);
}
/**
* Return true if raw.* xattrs should be preserved.
* @return true if raw.* xattrs should be preserved.
*/
public boolean shouldPreserveRawXattrs() {
return preserveRawXattrs;
}
/**
* Indicate that raw.* xattrs should be preserved
*/
public void preserveRawXattrs() {
preserveRawXattrs = true;
}
/** Get work path for atomic commit. If null, the work
* path would be parentOf(targetPath) + "/._WIP_" + nameOf(targetPath)
*
* @return Atomic work path on the target cluster. Null if not set
*/
public Path getAtomicWorkPath() {
return atomicWorkPath;
}
/**
* Set the work path for atomic commit
*
* @param atomicWorkPath - Path on the target cluster
*/
public void setAtomicWorkPath(Path atomicWorkPath) {
this.atomicWorkPath = atomicWorkPath;
}
/** Get output directory for writing distcp logs. Otherwise logs
* are temporarily written to JobStagingDir/_logs and deleted
* upon job completion
*
* @return Log output path on the cluster where distcp job is run
*/
public Path getLogPath() {
return logPath;
}
/**
* Set the log path where distcp output logs are stored
* Uses JobStagingDir/_logs by default
*
* @param logPath - Path where logs will be saved
*/
public void setLogPath(Path logPath) {
this.logPath = logPath;
}
/**
* Get the copy strategy to use. Uses appropriate input format
*
* @return copy strategy to use
*/
public String getCopyStrategy() {
return copyStrategy;
}
/**
* Set the copy strategy to use. Should map to a strategy implementation
* in distp-default.xml
*
* @param copyStrategy - copy Strategy to use
*/
public void setCopyStrategy(String copyStrategy) {
this.copyStrategy = copyStrategy;
}
/**
* File path (hdfs:// or file://) that contains the list of actual
* files to copy
*
* @return - Source listing file path
*/
public Path getSourceFileListing() {
return sourceFileListing;
}
/**
* Getter for sourcePaths.
* @return List of source-paths.
*/
public List<Path> getSourcePaths() {
return sourcePaths;
}
/**
* Setter for sourcePaths.
* @param sourcePaths The new list of source-paths.
*/
public void setSourcePaths(List<Path> sourcePaths) {
assert sourcePaths != null && sourcePaths.size() != 0;
this.sourcePaths = sourcePaths;
}
/**
* Getter for the targetPath.
* @return The target-path.
*/
public Path getTargetPath() {
return targetPath;
}
/**
* Getter for the targetPathExists.
* @return The target-path.
*/
public boolean getTargetPathExists() {
return targetPathExists;
}
/**
* Set targetPathExists.
* @param targetPathExists Whether the target path of distcp exists.
*/
public boolean setTargetPathExists(boolean targetPathExists) {
return this.targetPathExists = targetPathExists;
}
/**
* File path that contains the list of patterns
* for paths to be filtered from the file copy.
* @return - Filter file path.
*/
public final String getFiltersFile() {
return filtersFile;
}
/**
* Set filtersFile.
* @param filtersFilename The path to a list of patterns to exclude from copy.
*/
public final void setFiltersFile(String filtersFilename) {
this.filtersFile = filtersFilename;
}
public void validate(DistCpOptionSwitch option, boolean value) {
boolean syncFolder = (option == DistCpOptionSwitch.SYNC_FOLDERS ?
value : this.syncFolder);
boolean overwrite = (option == DistCpOptionSwitch.OVERWRITE ?
value : this.overwrite);
boolean deleteMissing = (option == DistCpOptionSwitch.DELETE_MISSING ?
value : this.deleteMissing);
boolean atomicCommit = (option == DistCpOptionSwitch.ATOMIC_COMMIT ?
value : this.atomicCommit);
boolean skipCRC = (option == DistCpOptionSwitch.SKIP_CRC ?
value : this.skipCRC);
boolean append = (option == DistCpOptionSwitch.APPEND ? value : this.append);
boolean useDiff = (option == DistCpOptionSwitch.DIFF ? value : this.useDiff);
if (syncFolder && atomicCommit) {
throw new IllegalArgumentException("Atomic commit can't be used with " +
"sync folder or overwrite options");
}
if (deleteMissing && !(overwrite || syncFolder)) {
throw new IllegalArgumentException("Delete missing is applicable " +
"only with update or overwrite options");
}
if (overwrite && syncFolder) {
throw new IllegalArgumentException("Overwrite and update options are " +
"mutually exclusive");
}
if (!syncFolder && skipCRC) {
throw new IllegalArgumentException("Skip CRC is valid only with update options");
}
if (!syncFolder && append) {
throw new IllegalArgumentException(
"Append is valid only with update options");
}
if (skipCRC && append) {
throw new IllegalArgumentException(
"Append is disallowed when skipping CRC");
}
if ((!syncFolder || !deleteMissing) && useDiff) {
throw new IllegalArgumentException(
"Diff is valid only with update and delete options");
}
}
/**
* Add options to configuration. These will be used in the Mapper/committer
*
* @param conf - Configruation object to which the options need to be added
*/
public void appendToConf(Configuration conf) {
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.ATOMIC_COMMIT,
String.valueOf(atomicCommit));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.IGNORE_FAILURES,
String.valueOf(ignoreFailures));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.SYNC_FOLDERS,
String.valueOf(syncFolder));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.DELETE_MISSING,
String.valueOf(deleteMissing));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.OVERWRITE,
String.valueOf(overwrite));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.APPEND,
String.valueOf(append));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.DIFF,
String.valueOf(useDiff));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.SKIP_CRC,
String.valueOf(skipCRC));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.BANDWIDTH,
String.valueOf(mapBandwidth));
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.PRESERVE_STATUS,
DistCpUtils.packAttributes(preserveStatus));
if (filtersFile != null) {
DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.FILTERS,
filtersFile);
}
}
/**
* Utility to easily string-ify Options, for logging.
*
* @return String representation of the Options.
*/
@Override
public String toString() {
return "DistCpOptions{" +
"atomicCommit=" + atomicCommit +
", syncFolder=" + syncFolder +
", deleteMissing=" + deleteMissing +
", ignoreFailures=" + ignoreFailures +
", maxMaps=" + maxMaps +
", sslConfigurationFile='" + sslConfigurationFile + '\'' +
", copyStrategy='" + copyStrategy + '\'' +
", sourceFileListing=" + sourceFileListing +
", sourcePaths=" + sourcePaths +
", targetPath=" + targetPath +
", targetPathExists=" + targetPathExists +
", preserveRawXattrs=" + preserveRawXattrs +
", filtersFile='" + filtersFile + '\'' +
'}';
}
@Override
protected DistCpOptions clone() throws CloneNotSupportedException {
return (DistCpOptions) super.clone();
}
}
| 19,942 | 28.156433 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/GlobbedCopyListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
/**
* GlobbedCopyListing implements the CopyListing interface, to create the copy
* listing-file by "globbing" all specified source paths (wild-cards and all.)
*/
public class GlobbedCopyListing extends CopyListing {
private static final Log LOG = LogFactory.getLog(GlobbedCopyListing.class);
private final CopyListing simpleListing;
/**
* Constructor, to initialize the configuration.
* @param configuration The input Configuration object.
* @param credentials Credentials object on which the FS delegation tokens are cached. If null
* delegation token caching is skipped
*/
public GlobbedCopyListing(Configuration configuration, Credentials credentials) {
super(configuration, credentials);
simpleListing = new SimpleCopyListing(getConf(), credentials) ;
}
/** {@inheritDoc} */
@Override
protected void validatePaths(DistCpOptions options)
throws IOException, InvalidInputException {
}
/**
* Implementation of CopyListing::buildListing().
* Creates the copy listing by "globbing" all source-paths.
* @param pathToListingFile The location at which the copy-listing file
* is to be created.
* @param options Input Options for DistCp (indicating source/target paths.)
* @throws IOException
*/
@Override
public void doBuildListing(Path pathToListingFile,
DistCpOptions options) throws IOException {
List<Path> globbedPaths = new ArrayList<Path>();
if (options.getSourcePaths().isEmpty()) {
throw new InvalidInputException("Nothing to process. Source paths::EMPTY");
}
for (Path p : options.getSourcePaths()) {
FileSystem fs = p.getFileSystem(getConf());
FileStatus[] inputs = fs.globStatus(p);
if(inputs != null && inputs.length > 0) {
for (FileStatus onePath: inputs) {
globbedPaths.add(onePath.getPath());
}
} else {
throw new InvalidInputException(p + " doesn't exist");
}
}
DistCpOptions optionsGlobbed = new DistCpOptions(options);
optionsGlobbed.setSourcePaths(globbedPaths);
simpleListing.buildListing(pathToListingFile, optionsGlobbed);
}
/** {@inheritDoc} */
@Override
protected long getBytesToCopy() {
return simpleListing.getBytesToCopy();
}
/** {@inheritDoc} */
@Override
protected long getNumberOfPaths() {
return simpleListing.getNumberOfPaths();
}
}
| 3,657 | 33.509434 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import java.io.*;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import com.google.common.annotations.VisibleForTesting;
/**
* A CopyFilter which compares Java Regex Patterns to each Path to determine
* whether a file should be copied.
*/
public class RegexCopyFilter extends CopyFilter {
private static final Log LOG = LogFactory.getLog(RegexCopyFilter.class);
private File filtersFile;
private List<Pattern> filters;
/**
* Constructor, sets up a File object to read filter patterns from and
* the List to store the patterns.
*/
protected RegexCopyFilter(String filtersFilename) {
filtersFile = new File(filtersFilename);
filters = new ArrayList<>();
}
/**
* Loads a list of filter patterns for use in shouldCopy.
*/
@Override
public void initialize() {
BufferedReader reader = null;
try {
InputStream is = new FileInputStream(filtersFile);
reader = new BufferedReader(new InputStreamReader(is,
Charset.forName("UTF-8")));
String line;
while ((line = reader.readLine()) != null) {
Pattern pattern = Pattern.compile(line);
filters.add(pattern);
}
} catch (FileNotFoundException notFound) {
LOG.error("Can't find filters file " + filtersFile);
} catch (IOException cantRead) {
LOG.error("An error occurred while attempting to read from " +
filtersFile);
} finally {
IOUtils.cleanup(LOG, reader);
}
}
/**
* Sets the list of filters to exclude files from copy.
* Simplifies testing of the filters feature.
*
* @param filtersList a list of Patterns to be excluded
*/
@VisibleForTesting
protected final void setFilters(List<Pattern> filtersList) {
this.filters = filtersList;
}
@Override
public boolean shouldCopy(Path path) {
for (Pattern filter : filters) {
if (filter.matcher(path.toString()).matches()) {
return false;
}
}
return true;
}
}
| 3,030 | 29.616162 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
/**
* This class provides the basic functionality to sync two FileSystems based on
* the snapshot diff report. More specifically, we have the following settings:
* 1. Both the source and target FileSystem must be DistributedFileSystem
* 2. Two snapshots (e.g., s1 and s2) have been created on the source FS.
* The diff between these two snapshots will be copied to the target FS.
* 3. The target has the same snapshot s1. No changes have been made on the
* target since s1. All the files/directories in the target are the same with
* source.s1
*/
class DistCpSync {
static boolean sync(DistCpOptions inputOptions, Configuration conf)
throws IOException {
List<Path> sourcePaths = inputOptions.getSourcePaths();
if (sourcePaths.size() != 1) {
// we only support one source dir which must be a snapshottable directory
throw new IllegalArgumentException(sourcePaths.size()
+ " source paths are provided");
}
final Path sourceDir = sourcePaths.get(0);
final Path targetDir = inputOptions.getTargetPath();
final FileSystem sfs = sourceDir.getFileSystem(conf);
final FileSystem tfs = targetDir.getFileSystem(conf);
// currently we require both the source and the target file system are
// DistributedFileSystem.
if (!(sfs instanceof DistributedFileSystem) ||
!(tfs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("The FileSystems needs to" +
" be DistributedFileSystem for using snapshot-diff-based distcp");
}
final DistributedFileSystem sourceFs = (DistributedFileSystem) sfs;
final DistributedFileSystem targetFs= (DistributedFileSystem) tfs;
// make sure targetFS has no change between from and the current states
if (!checkNoChange(inputOptions, targetFs, targetDir)) {
// set the source path using the snapshot path
inputOptions.setSourcePaths(Arrays.asList(getSourceSnapshotPath(sourceDir,
inputOptions.getToSnapshot())));
return false;
}
Path tmpDir = null;
try {
tmpDir = createTargetTmpDir(targetFs, targetDir);
DiffInfo[] diffs = getDiffs(inputOptions, sourceFs, sourceDir, targetDir);
if (diffs == null) {
return false;
}
// do the real sync work: deletion and rename
syncDiff(diffs, targetFs, tmpDir);
return true;
} catch (Exception e) {
DistCp.LOG.warn("Failed to use snapshot diff for distcp", e);
return false;
} finally {
deleteTargetTmpDir(targetFs, tmpDir);
// TODO: since we have tmp directory, we can support "undo" with failures
// set the source path using the snapshot path
inputOptions.setSourcePaths(Arrays.asList(getSourceSnapshotPath(sourceDir,
inputOptions.getToSnapshot())));
}
}
private static String getSnapshotName(String name) {
return Path.CUR_DIR.equals(name) ? "" : name;
}
private static Path getSourceSnapshotPath(Path sourceDir, String snapshotName) {
if (Path.CUR_DIR.equals(snapshotName)) {
return sourceDir;
} else {
return new Path(sourceDir,
HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName);
}
}
private static Path createTargetTmpDir(DistributedFileSystem targetFs,
Path targetDir) throws IOException {
final Path tmp = new Path(targetDir,
DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt());
if (!targetFs.mkdirs(tmp)) {
throw new IOException("The tmp directory " + tmp + " already exists");
}
return tmp;
}
private static void deleteTargetTmpDir(DistributedFileSystem targetFs,
Path tmpDir) {
try {
if (tmpDir != null) {
targetFs.delete(tmpDir, true);
}
} catch (IOException e) {
DistCp.LOG.error("Unable to cleanup tmp dir: " + tmpDir, e);
}
}
/**
* Compute the snapshot diff on the given file system. Return true if the diff
* is empty, i.e., no changes have happened in the FS.
*/
private static boolean checkNoChange(DistCpOptions inputOptions,
DistributedFileSystem fs, Path path) {
try {
SnapshotDiffReport targetDiff =
fs.getSnapshotDiffReport(path, inputOptions.getFromSnapshot(), "");
if (!targetDiff.getDiffList().isEmpty()) {
DistCp.LOG.warn("The target has been modified since snapshot "
+ inputOptions.getFromSnapshot());
return false;
} else {
return true;
}
} catch (IOException e) {
DistCp.LOG.warn("Failed to compute snapshot diff on " + path, e);
}
return false;
}
@VisibleForTesting
static DiffInfo[] getDiffs(DistCpOptions inputOptions,
DistributedFileSystem fs, Path sourceDir, Path targetDir) {
try {
final String from = getSnapshotName(inputOptions.getFromSnapshot());
final String to = getSnapshotName(inputOptions.getToSnapshot());
SnapshotDiffReport sourceDiff = fs.getSnapshotDiffReport(sourceDir,
from, to);
return DiffInfo.getDiffs(sourceDiff, targetDir);
} catch (IOException e) {
DistCp.LOG.warn("Failed to compute snapshot diff on " + sourceDir, e);
}
return null;
}
private static void syncDiff(DiffInfo[] diffs,
DistributedFileSystem targetFs, Path tmpDir) throws IOException {
moveToTmpDir(diffs, targetFs, tmpDir);
moveToTarget(diffs, targetFs);
}
/**
* Move all the source files that should be renamed or deleted to the tmp
* directory.
*/
private static void moveToTmpDir(DiffInfo[] diffs,
DistributedFileSystem targetFs, Path tmpDir) throws IOException {
// sort the diffs based on their source paths to make sure the files and
// subdirs are moved before moving their parents/ancestors.
Arrays.sort(diffs, DiffInfo.sourceComparator);
Random random = new Random();
for (DiffInfo diff : diffs) {
Path tmpTarget = new Path(tmpDir, diff.source.getName());
while (targetFs.exists(tmpTarget)) {
tmpTarget = new Path(tmpDir, diff.source.getName() + random.nextInt());
}
diff.setTmp(tmpTarget);
targetFs.rename(diff.source, tmpTarget);
}
}
/**
* Finish the rename operations: move all the intermediate files/directories
* from the tmp dir to the final targets.
*/
private static void moveToTarget(DiffInfo[] diffs,
DistributedFileSystem targetFs) throws IOException {
// sort the diffs based on their target paths to make sure the parent
// directories are created first.
Arrays.sort(diffs, DiffInfo.targetComparator);
for (DiffInfo diff : diffs) {
if (diff.target != null) {
if (!targetFs.exists(diff.target.getParent())) {
targetFs.mkdirs(diff.target.getParent());
}
targetFs.rename(diff.getTmp(), diff.target);
}
}
}
}
| 8,131 | 37 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.io.WritableUtils;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
* CopyListingFileStatus is a specialized subclass of {@link FileStatus} for
* attaching additional data members useful to distcp. This class does not
* override {@link FileStatus#compareTo}, because the additional data members
* are not relevant to sort order.
*/
@InterfaceAudience.Private
public final class CopyListingFileStatus extends FileStatus {
private static final byte NO_ACL_ENTRIES = -1;
private static final int NO_XATTRS = -1;
// Retain static arrays of enum values to prevent repeated allocation of new
// arrays during deserialization.
private static final AclEntryType[] ACL_ENTRY_TYPES = AclEntryType.values();
private static final AclEntryScope[] ACL_ENTRY_SCOPES = AclEntryScope.values();
private static final FsAction[] FS_ACTIONS = FsAction.values();
private List<AclEntry> aclEntries;
private Map<String, byte[]> xAttrs;
/**
* Default constructor.
*/
public CopyListingFileStatus() {
}
/**
* Creates a new CopyListingFileStatus by copying the members of the given
* FileStatus.
*
* @param fileStatus FileStatus to copy
*/
public CopyListingFileStatus(FileStatus fileStatus) throws IOException {
super(fileStatus);
}
/**
* Returns the full logical ACL.
*
* @return List containing full logical ACL
*/
public List<AclEntry> getAclEntries() {
return AclUtil.getAclFromPermAndEntries(getPermission(),
aclEntries != null ? aclEntries : Collections.<AclEntry>emptyList());
}
/**
* Sets optional ACL entries.
*
* @param aclEntries List containing all ACL entries
*/
public void setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries;
}
/**
* Returns all xAttrs.
*
* @return Map containing all xAttrs
*/
public Map<String, byte[]> getXAttrs() {
return xAttrs != null ? xAttrs : Collections.<String, byte[]>emptyMap();
}
/**
* Sets optional xAttrs.
*
* @param xAttrs Map containing all xAttrs
*/
public void setXAttrs(Map<String, byte[]> xAttrs) {
this.xAttrs = xAttrs;
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
if (aclEntries != null) {
// byte is sufficient, because 32 ACL entries is the max enforced by HDFS.
out.writeByte(aclEntries.size());
for (AclEntry entry: aclEntries) {
out.writeByte(entry.getScope().ordinal());
out.writeByte(entry.getType().ordinal());
WritableUtils.writeString(out, entry.getName());
out.writeByte(entry.getPermission().ordinal());
}
} else {
out.writeByte(NO_ACL_ENTRIES);
}
if (xAttrs != null) {
out.writeInt(xAttrs.size());
Iterator<Entry<String, byte[]>> iter = xAttrs.entrySet().iterator();
while (iter.hasNext()) {
Entry<String, byte[]> entry = iter.next();
WritableUtils.writeString(out, entry.getKey());
final byte[] value = entry.getValue();
if (value != null) {
out.writeInt(value.length);
if (value.length > 0) {
out.write(value);
}
} else {
out.writeInt(-1);
}
}
} else {
out.writeInt(NO_XATTRS);
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
byte aclEntriesSize = in.readByte();
if (aclEntriesSize != NO_ACL_ENTRIES) {
aclEntries = Lists.newArrayListWithCapacity(aclEntriesSize);
for (int i = 0; i < aclEntriesSize; ++i) {
aclEntries.add(new AclEntry.Builder()
.setScope(ACL_ENTRY_SCOPES[in.readByte()])
.setType(ACL_ENTRY_TYPES[in.readByte()])
.setName(WritableUtils.readString(in))
.setPermission(FS_ACTIONS[in.readByte()])
.build());
}
} else {
aclEntries = null;
}
int xAttrsSize = in.readInt();
if (xAttrsSize != NO_XATTRS) {
xAttrs = Maps.newHashMap();
for (int i = 0; i < xAttrsSize; ++i) {
final String name = WritableUtils.readString(in);
final int valueLen = in.readInt();
byte[] value = null;
if (valueLen > -1) {
value = new byte[valueLen];
if (valueLen > 0) {
in.readFully(value);
}
}
xAttrs.put(name, value);
}
} else {
xAttrs = null;
}
}
@Override
public boolean equals(Object o) {
if (!super.equals(o)) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
CopyListingFileStatus other = (CopyListingFileStatus)o;
return Objects.equal(aclEntries, other.aclEntries) &&
Objects.equal(xAttrs, other.xAttrs);
}
@Override
public int hashCode() {
return Objects.hashCode(super.hashCode(), aclEntries, xAttrs);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(super.toString());
sb.append('{');
sb.append("aclEntries = " + aclEntries);
sb.append(", xAttrs = " + xAttrs);
sb.append('}');
return sb.toString();
}
}
| 6,640 | 29.324201 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.security.Credentials;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.URI;
import java.util.Set;
import com.google.common.collect.Sets;
/**
* The CopyListing abstraction is responsible for how the list of
* sources and targets is constructed, for DistCp's copy function.
* The copy-listing should be a
* SequenceFile<Text, CopyListingFileStatus>, located at the path
* specified to buildListing(), each entry being a pair of (Source relative
* path, source file status), all the paths being fully qualified.
*/
public abstract class CopyListing extends Configured {
private Credentials credentials;
/**
* Build listing function creates the input listing that distcp uses to
* perform the copy.
*
* The build listing is a sequence file that has relative path of a file in the key
* and the file status information of the source file in the value
*
* For instance if the source path is /tmp/data and the traversed path is
* /tmp/data/dir1/dir2/file1, then the sequence file would contain
*
* key: /dir1/dir2/file1 and value: FileStatus(/tmp/data/dir1/dir2/file1)
*
* File would also contain directory entries. Meaning, if /tmp/data/dir1/dir2/file1
* is the only file under /tmp/data, the resulting sequence file would contain the
* following entries
*
* key: /dir1 and value: FileStatus(/tmp/data/dir1)
* key: /dir1/dir2 and value: FileStatus(/tmp/data/dir1/dir2)
* key: /dir1/dir2/file1 and value: FileStatus(/tmp/data/dir1/dir2/file1)
*
* Cases requiring special handling:
* If source path is a file (/tmp/file1), contents of the file will be as follows
*
* TARGET DOES NOT EXIST: Key-"", Value-FileStatus(/tmp/file1)
* TARGET IS FILE : Key-"", Value-FileStatus(/tmp/file1)
* TARGET IS DIR : Key-"/file1", Value-FileStatus(/tmp/file1)
*
* @param pathToListFile - Output file where the listing would be stored
* @param options - Input options to distcp
* @throws IOException - Exception if any
*/
public final void buildListing(Path pathToListFile,
DistCpOptions options) throws IOException {
validatePaths(options);
doBuildListing(pathToListFile, options);
Configuration config = getConf();
config.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, pathToListFile.toString());
config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, getBytesToCopy());
config.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, getNumberOfPaths());
validateFinalListing(pathToListFile, options);
}
/**
* Validate input and output paths
*
* @param options - Input options
* @throws InvalidInputException If inputs are invalid
* @throws IOException any Exception with FS
*/
protected abstract void validatePaths(DistCpOptions options)
throws IOException, InvalidInputException;
/**
* The interface to be implemented by sub-classes, to create the source/target file listing.
* @param pathToListFile Path on HDFS where the listing file is written.
* @param options Input Options for DistCp (indicating source/target paths.)
* @throws IOException Thrown on failure to create the listing file.
*/
protected abstract void doBuildListing(Path pathToListFile,
DistCpOptions options) throws IOException;
/**
* Return the total bytes that distCp should copy for the source paths
* This doesn't consider whether file is same should be skipped during copy
*
* @return total bytes to copy
*/
protected abstract long getBytesToCopy();
/**
* Return the total number of paths to distcp, includes directories as well
* This doesn't consider whether file/dir is already present and should be skipped during copy
*
* @return Total number of paths to distcp
*/
protected abstract long getNumberOfPaths();
/**
* Validate the final resulting path listing. Checks if there are duplicate
* entries. If preserving ACLs, checks that file system can support ACLs.
* If preserving XAttrs, checks that file system can support XAttrs.
*
* @param pathToListFile - path listing build by doBuildListing
* @param options - Input options to distcp
* @throws IOException - Any issues while checking for duplicates and throws
* @throws DuplicateFileException - if there are duplicates
*/
private void validateFinalListing(Path pathToListFile, DistCpOptions options)
throws DuplicateFileException, IOException {
Configuration config = getConf();
FileSystem fs = pathToListFile.getFileSystem(config);
Path sortedList = DistCpUtils.sortListing(fs, config, pathToListFile);
SequenceFile.Reader reader = new SequenceFile.Reader(
config, SequenceFile.Reader.file(sortedList));
try {
Text lastKey = new Text("*"); //source relative path can never hold *
CopyListingFileStatus lastFileStatus = new CopyListingFileStatus();
Text currentKey = new Text();
Set<URI> aclSupportCheckFsSet = Sets.newHashSet();
Set<URI> xAttrSupportCheckFsSet = Sets.newHashSet();
while (reader.next(currentKey)) {
if (currentKey.equals(lastKey)) {
CopyListingFileStatus currentFileStatus = new CopyListingFileStatus();
reader.getCurrentValue(currentFileStatus);
throw new DuplicateFileException("File " + lastFileStatus.getPath() + " and " +
currentFileStatus.getPath() + " would cause duplicates. Aborting");
}
reader.getCurrentValue(lastFileStatus);
if (options.shouldPreserve(DistCpOptions.FileAttribute.ACL)) {
FileSystem lastFs = lastFileStatus.getPath().getFileSystem(config);
URI lastFsUri = lastFs.getUri();
if (!aclSupportCheckFsSet.contains(lastFsUri)) {
DistCpUtils.checkFileSystemAclSupport(lastFs);
aclSupportCheckFsSet.add(lastFsUri);
}
}
if (options.shouldPreserve(DistCpOptions.FileAttribute.XATTR)) {
FileSystem lastFs = lastFileStatus.getPath().getFileSystem(config);
URI lastFsUri = lastFs.getUri();
if (!xAttrSupportCheckFsSet.contains(lastFsUri)) {
DistCpUtils.checkFileSystemXAttrSupport(lastFs);
xAttrSupportCheckFsSet.add(lastFsUri);
}
}
lastKey.set(currentKey);
}
} finally {
IOUtils.closeStream(reader);
}
}
/**
* Protected constructor, to initialize configuration.
* @param configuration The input configuration,
* with which the source/target FileSystems may be accessed.
* @param credentials - Credentials object on which the FS delegation tokens are cached.If null
* delegation token caching is skipped
*/
protected CopyListing(Configuration configuration, Credentials credentials) {
setConf(configuration);
setCredentials(credentials);
}
/**
* set Credentials store, on which FS delegatin token will be cached
* @param credentials - Credentials object
*/
protected void setCredentials(Credentials credentials) {
this.credentials = credentials;
}
/**
* get credentials to update the delegation tokens for accessed FS objects
* @return Credentials object
*/
protected Credentials getCredentials() {
return credentials;
}
/**
* Public Factory method with which the appropriate CopyListing implementation may be retrieved.
* @param configuration The input configuration.
* @param credentials Credentials object on which the FS delegation tokens are cached
* @param options The input Options, to help choose the appropriate CopyListing Implementation.
* @return An instance of the appropriate CopyListing implementation.
* @throws java.io.IOException - Exception if any
*/
public static CopyListing getCopyListing(Configuration configuration,
Credentials credentials,
DistCpOptions options)
throws IOException {
if (options.shouldUseDiff()) {
return new GlobbedCopyListing(configuration, credentials);
}
String copyListingClassName = configuration.get(DistCpConstants.
CONF_LABEL_COPY_LISTING_CLASS, "");
Class<? extends CopyListing> copyListingClass;
try {
if (! copyListingClassName.isEmpty()) {
copyListingClass = configuration.getClass(DistCpConstants.
CONF_LABEL_COPY_LISTING_CLASS, GlobbedCopyListing.class,
CopyListing.class);
} else {
if (options.getSourceFileListing() == null) {
copyListingClass = GlobbedCopyListing.class;
} else {
copyListingClass = FileBasedCopyListing.class;
}
}
copyListingClassName = copyListingClass.getName();
Constructor<? extends CopyListing> constructor = copyListingClass.
getDeclaredConstructor(Configuration.class, Credentials.class);
return constructor.newInstance(configuration, credentials);
} catch (Exception e) {
throw new IOException("Unable to instantiate " + copyListingClassName, e);
}
}
static class DuplicateFileException extends RuntimeException {
public DuplicateFileException(String message) {
super(message);
}
}
static class InvalidInputException extends RuntimeException {
public InvalidInputException(String message) {
super(message);
}
}
public static class AclsNotSupportedException extends RuntimeException {
public AclsNotSupportedException(String message) {
super(message);
}
}
public static class XAttrsNotSupportedException extends RuntimeException {
public XAttrsNotSupportedException(String message) {
super(message);
}
}
}
| 11,052 | 38.758993 | 98 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.