repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.InputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BoundedByteArrayOutputStream;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.mapred.IFileInputStream;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class InMemoryMapOutput<K, V> extends IFileWrappedMapOutput<K, V> {
private static final Log LOG = LogFactory.getLog(InMemoryMapOutput.class);
private final byte[] memory;
private BoundedByteArrayOutputStream byteStream;
// Decompression of map-outputs
private final CompressionCodec codec;
private final Decompressor decompressor;
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
MergeManagerImpl<K, V> merger,
int size, CompressionCodec codec,
boolean primaryMapOutput) {
super(conf, merger, mapId, (long)size, primaryMapOutput);
this.codec = codec;
byteStream = new BoundedByteArrayOutputStream(size);
memory = byteStream.getBuffer();
if (codec != null) {
decompressor = CodecPool.getDecompressor(codec);
} else {
decompressor = null;
}
}
public byte[] getMemory() {
return memory;
}
public BoundedByteArrayOutputStream getArrayStream() {
return byteStream;
}
@Override
protected void doShuffle(MapHost host, IFileInputStream iFin,
long compressedLength, long decompressedLength,
ShuffleClientMetrics metrics,
Reporter reporter) throws IOException {
InputStream input = iFin;
// Are map-outputs compressed?
if (codec != null) {
decompressor.reset();
input = codec.createInputStream(input, decompressor);
}
try {
IOUtils.readFully(input, memory, 0, memory.length);
metrics.inputBytes(memory.length);
reporter.progress();
LOG.info("Read " + memory.length + " bytes from map-output for " +
getMapId());
/**
* We've gotten the amount of data we were expecting. Verify the
* decompressor has nothing more to offer. This action also forces the
* decompressor to read any trailing bytes that weren't critical
* for decompression, which is necessary to keep the stream
* in sync.
*/
if (input.read() >= 0 ) {
throw new IOException("Unexpected extra bytes from input stream for " +
getMapId());
}
} finally {
CodecPool.returnDecompressor(decompressor);
}
}
@Override
public void commit() throws IOException {
getMerger().closeInMemoryFile(this);
}
@Override
public void abort() {
getMerger().unreserve(memory.length);
}
@Override
public String getDescription() {
return "MEMORY";
}
}
| 4,166 | 31.811024 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.IFileInputStream;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.CryptoUtils;
import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath;
import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class OnDiskMapOutput<K, V> extends IFileWrappedMapOutput<K, V> {
private static final Log LOG = LogFactory.getLog(OnDiskMapOutput.class);
private final FileSystem fs;
private final Path tmpOutputPath;
private final Path outputPath;
private final OutputStream disk;
private long compressedSize;
@Deprecated
public OnDiskMapOutput(TaskAttemptID mapId, TaskAttemptID reduceId,
MergeManagerImpl<K,V> merger, long size,
JobConf conf,
MapOutputFile mapOutputFile,
int fetcher, boolean primaryMapOutput)
throws IOException {
this(mapId, merger, size, conf, fetcher,
primaryMapOutput, FileSystem.getLocal(conf).getRaw(),
mapOutputFile.getInputFileForWrite(mapId.getTaskID(), size));
}
@Deprecated
OnDiskMapOutput(TaskAttemptID mapId, TaskAttemptID reduceId,
MergeManagerImpl<K,V> merger, long size,
JobConf conf,
MapOutputFile mapOutputFile,
int fetcher, boolean primaryMapOutput,
FileSystem fs, Path outputPath) throws IOException {
this(mapId, merger, size, conf, fetcher, primaryMapOutput, fs, outputPath);
}
OnDiskMapOutput(TaskAttemptID mapId,
MergeManagerImpl<K, V> merger, long size,
JobConf conf,
int fetcher, boolean primaryMapOutput,
FileSystem fs, Path outputPath) throws IOException {
super(conf, merger, mapId, size, primaryMapOutput);
this.fs = fs;
this.outputPath = outputPath;
tmpOutputPath = getTempPath(outputPath, fetcher);
disk = CryptoUtils.wrapIfNecessary(conf, fs.create(tmpOutputPath));
}
@VisibleForTesting
static Path getTempPath(Path outPath, int fetcher) {
return outPath.suffix(String.valueOf(fetcher));
}
@Override
protected void doShuffle(MapHost host, IFileInputStream input,
long compressedLength, long decompressedLength,
ShuffleClientMetrics metrics,
Reporter reporter) throws IOException {
// Copy data to local-disk
long bytesLeft = compressedLength;
try {
final int BYTES_TO_READ = 64 * 1024;
byte[] buf = new byte[BYTES_TO_READ];
while (bytesLeft > 0) {
int n = input.readWithChecksum(buf, 0,
(int) Math.min(bytesLeft, BYTES_TO_READ));
if (n < 0) {
throw new IOException("read past end of stream reading " +
getMapId());
}
disk.write(buf, 0, n);
bytesLeft -= n;
metrics.inputBytes(n);
reporter.progress();
}
LOG.info("Read " + (compressedLength - bytesLeft) +
" bytes from map-output for " + getMapId());
disk.close();
} catch (IOException ioe) {
// Close the streams
IOUtils.cleanup(LOG, disk);
// Re-throw
throw ioe;
}
// Sanity check
if (bytesLeft != 0) {
throw new IOException("Incomplete map output received for " +
getMapId() + " from " +
host.getHostName() + " (" +
bytesLeft + " bytes missing of " +
compressedLength + ")");
}
this.compressedSize = compressedLength;
}
@Override
public void commit() throws IOException {
fs.rename(tmpOutputPath, outputPath);
CompressAwarePath compressAwarePath = new CompressAwarePath(outputPath,
getSize(), this.compressedSize);
getMerger().closeOnDiskFile(compressAwarePath);
}
@Override
public void abort() {
try {
fs.delete(tmpOutputPath, false);
} catch (IOException ie) {
LOG.info("failure to clean up " + tmpOutputPath, ie);
}
}
@Override
public String getDescription() {
return "DISK";
}
}
| 5,690 | 33.91411 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.mapred.IFile.Reader;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/**
* <code>IFile.InMemoryReader</code> to read map-outputs present in-memory.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class InMemoryReader<K, V> extends Reader<K, V> {
private final TaskAttemptID taskAttemptId;
private final MergeManagerImpl<K,V> merger;
private final DataInputBuffer memDataIn = new DataInputBuffer();
private final int start;
private final int length;
public InMemoryReader(MergeManagerImpl<K,V> merger, TaskAttemptID taskAttemptId,
byte[] data, int start, int length, Configuration conf)
throws IOException {
super(conf, null, length - start, null, null);
this.merger = merger;
this.taskAttemptId = taskAttemptId;
buffer = data;
bufferSize = (int)fileLength;
memDataIn.reset(buffer, start, length - start);
this.start = start;
this.length = length;
}
@Override
public void reset(int offset) {
memDataIn.reset(buffer, start + offset, length - start - offset);
bytesRead = offset;
eof = false;
}
@Override
public long getPosition() throws IOException {
// InMemoryReader does not initialize streams like Reader, so in.getPos()
// would not work. Instead, return the number of uncompressed bytes read,
// which will be correct since in-memory data is not compressed.
return bytesRead;
}
@Override
public long getLength() {
return fileLength;
}
private void dumpOnError() {
File dumpFile = new File("../output/" + taskAttemptId + ".dump");
System.err.println("Dumping corrupt map-output of " + taskAttemptId +
" to " + dumpFile.getAbsolutePath());
try (FileOutputStream fos = new FileOutputStream(dumpFile)) {
fos.write(buffer, 0, bufferSize);
} catch (IOException ioe) {
System.err.println("Failed to dump map-output of " + taskAttemptId);
}
}
public boolean nextRawKey(DataInputBuffer key) throws IOException {
try {
if (!positionToNextRecord(memDataIn)) {
return false;
}
// Setup the key
int pos = memDataIn.getPosition();
byte[] data = memDataIn.getData();
key.reset(data, pos, currentKeyLength);
// Position for the next value
long skipped = memDataIn.skip(currentKeyLength);
if (skipped != currentKeyLength) {
throw new IOException("Rec# " + recNo +
": Failed to skip past key of length: " +
currentKeyLength);
}
// Record the byte
bytesRead += currentKeyLength;
return true;
} catch (IOException ioe) {
dumpOnError();
throw ioe;
}
}
public void nextRawValue(DataInputBuffer value) throws IOException {
try {
int pos = memDataIn.getPosition();
byte[] data = memDataIn.getData();
value.reset(data, pos, currentValueLength);
// Position for the next record
long skipped = memDataIn.skip(currentValueLength);
if (skipped != currentValueLength) {
throw new IOException("Rec# " + recNo +
": Failed to skip past value of length: " +
currentValueLength);
}
// Record the byte
bytesRead += currentValueLength;
++recNo;
} catch (IOException ioe) {
dumpOnError();
throw ioe;
}
}
public void close() {
// Release
dataIn = null;
buffer = null;
// Inform the MergeManager
if (merger != null) {
merger.unreserve(bufferSize);
}
}
}
| 4,731 | 30.972973 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapreduce.TaskAttemptID;
class EventFetcher<K,V> extends Thread {
private static final long SLEEP_TIME = 1000;
private static final int MAX_RETRIES = 10;
private static final int RETRY_PERIOD = 5000;
private static final Log LOG = LogFactory.getLog(EventFetcher.class);
private final TaskAttemptID reduce;
private final TaskUmbilicalProtocol umbilical;
private final ShuffleScheduler<K,V> scheduler;
private int fromEventIdx = 0;
private final int maxEventsToFetch;
private final ExceptionReporter exceptionReporter;
private volatile boolean stopped = false;
public EventFetcher(TaskAttemptID reduce,
TaskUmbilicalProtocol umbilical,
ShuffleScheduler<K,V> scheduler,
ExceptionReporter reporter,
int maxEventsToFetch) {
setName("EventFetcher for fetching Map Completion Events");
setDaemon(true);
this.reduce = reduce;
this.umbilical = umbilical;
this.scheduler = scheduler;
exceptionReporter = reporter;
this.maxEventsToFetch = maxEventsToFetch;
}
@Override
public void run() {
int failures = 0;
LOG.info(reduce + " Thread started: " + getName());
try {
while (!stopped && !Thread.currentThread().isInterrupted()) {
try {
int numNewMaps = getMapCompletionEvents();
failures = 0;
if (numNewMaps > 0) {
LOG.info(reduce + ": " + "Got " + numNewMaps + " new map-outputs");
}
LOG.debug("GetMapEventsThread about to sleep for " + SLEEP_TIME);
if (!Thread.currentThread().isInterrupted()) {
Thread.sleep(SLEEP_TIME);
}
} catch (InterruptedException e) {
LOG.info("EventFetcher is interrupted.. Returning");
return;
} catch (IOException ie) {
LOG.info("Exception in getting events", ie);
// check to see whether to abort
if (++failures >= MAX_RETRIES) {
throw new IOException("too many failures downloading events", ie);
}
// sleep for a bit
if (!Thread.currentThread().isInterrupted()) {
Thread.sleep(RETRY_PERIOD);
}
}
}
} catch (InterruptedException e) {
return;
} catch (Throwable t) {
exceptionReporter.reportException(t);
return;
}
}
public void shutDown() {
this.stopped = true;
interrupt();
try {
join(5000);
} catch(InterruptedException ie) {
LOG.warn("Got interrupted while joining " + getName(), ie);
}
}
/**
* Queries the {@link TaskTracker} for a set of map-completion events
* from a given event ID.
* @throws IOException
*/
protected int getMapCompletionEvents()
throws IOException, InterruptedException {
int numNewMaps = 0;
TaskCompletionEvent events[] = null;
do {
MapTaskCompletionEventsUpdate update =
umbilical.getMapCompletionEvents(
(org.apache.hadoop.mapred.JobID)reduce.getJobID(),
fromEventIdx,
maxEventsToFetch,
(org.apache.hadoop.mapred.TaskAttemptID)reduce);
events = update.getMapTaskCompletionEvents();
LOG.debug("Got " + events.length + " map completion events from " +
fromEventIdx);
assert !update.shouldReset() : "Unexpected legacy state";
// Update the last seen event ID
fromEventIdx += events.length;
// Process the TaskCompletionEvents:
// 1. Save the SUCCEEDED maps in knownOutputs to fetch the outputs.
// 2. Save the OBSOLETE/FAILED/KILLED maps in obsoleteOutputs to stop
// fetching from those maps.
// 3. Remove TIPFAILED maps from neededOutputs since we don't need their
// outputs at all.
for (TaskCompletionEvent event : events) {
scheduler.resolve(event);
if (TaskCompletionEvent.Status.SUCCEEDED == event.getTaskStatus()) {
++numNewMaps;
}
}
} while (events.length == maxEventsToFetch);
return numNewMaps;
}
}
| 5,282 | 33.529412 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.TaskCompletionEvent;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface ShuffleScheduler<K,V> {
/**
* Wait until the shuffle finishes or until the timeout.
* @param millis maximum wait time
* @return true if the shuffle is done
* @throws InterruptedException
*/
public boolean waitUntilDone(int millis) throws InterruptedException;
/**
* Interpret a {@link TaskCompletionEvent} from the event stream.
* @param tce Intermediate output metadata
*/
public void resolve(TaskCompletionEvent tce)
throws IOException, InterruptedException;
public void close() throws InterruptedException;
}
| 1,686 | 34.145833 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.task.reduce.MapHost.State;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.Time;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ShuffleSchedulerImpl<K,V> implements ShuffleScheduler<K,V> {
private static final ThreadLocal<Long> SHUFFLE_START =
new ThreadLocal<Long>() {
protected Long initialValue() {
return 0L;
}
};
private static final Log LOG = LogFactory.getLog(ShuffleSchedulerImpl.class);
private static final int MAX_MAPS_AT_ONCE = 20;
private static final long INITIAL_PENALTY = 10000;
private static final float PENALTY_GROWTH_RATE = 1.3f;
private final static int REPORT_FAILURE_LIMIT = 10;
private static final float BYTES_PER_MILLIS_TO_MBS = 1000f / 1024 / 1024;
private final boolean[] finishedMaps;
private final int totalMaps;
private int remainingMaps;
private Map<String, MapHost> mapLocations = new HashMap<String, MapHost>();
private Set<MapHost> pendingHosts = new HashSet<MapHost>();
private Set<TaskAttemptID> obsoleteMaps = new HashSet<TaskAttemptID>();
private final TaskAttemptID reduceId;
private final Random random = new Random();
private final DelayQueue<Penalty> penalties = new DelayQueue<Penalty>();
private final Referee referee = new Referee();
private final Map<TaskAttemptID,IntWritable> failureCounts =
new HashMap<TaskAttemptID,IntWritable>();
private final Map<String,IntWritable> hostFailures =
new HashMap<String,IntWritable>();
private final TaskStatus status;
private final ExceptionReporter reporter;
private final int abortFailureLimit;
private final Progress progress;
private final Counters.Counter shuffledMapsCounter;
private final Counters.Counter reduceShuffleBytes;
private final Counters.Counter failedShuffleCounter;
private final long startTime;
private long lastProgressTime;
private final CopyTimeTracker copyTimeTracker;
private volatile int maxMapRuntime = 0;
private final int maxFailedUniqueFetches;
private final int maxFetchFailuresBeforeReporting;
private long totalBytesShuffledTillNow = 0;
private final DecimalFormat mbpsFormat = new DecimalFormat("0.00");
private final boolean reportReadErrorImmediately;
private long maxDelay = MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY;
private int maxHostFailures;
public ShuffleSchedulerImpl(JobConf job, TaskStatus status,
TaskAttemptID reduceId,
ExceptionReporter reporter,
Progress progress,
Counters.Counter shuffledMapsCounter,
Counters.Counter reduceShuffleBytes,
Counters.Counter failedShuffleCounter) {
totalMaps = job.getNumMapTasks();
abortFailureLimit = Math.max(30, totalMaps / 10);
copyTimeTracker = new CopyTimeTracker();
remainingMaps = totalMaps;
finishedMaps = new boolean[remainingMaps];
this.reporter = reporter;
this.status = status;
this.reduceId = reduceId;
this.progress = progress;
this.shuffledMapsCounter = shuffledMapsCounter;
this.reduceShuffleBytes = reduceShuffleBytes;
this.failedShuffleCounter = failedShuffleCounter;
this.startTime = Time.monotonicNow();
lastProgressTime = startTime;
referee.start();
this.maxFailedUniqueFetches = Math.min(totalMaps, 5);
this.maxFetchFailuresBeforeReporting = job.getInt(
MRJobConfig.SHUFFLE_FETCH_FAILURES, REPORT_FAILURE_LIMIT);
this.reportReadErrorImmediately = job.getBoolean(
MRJobConfig.SHUFFLE_NOTIFY_READERROR, true);
this.maxDelay = job.getLong(MRJobConfig.MAX_SHUFFLE_FETCH_RETRY_DELAY,
MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY);
this.maxHostFailures = job.getInt(
MRJobConfig.MAX_SHUFFLE_FETCH_HOST_FAILURES,
MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_HOST_FAILURES);
}
@Override
public void resolve(TaskCompletionEvent event) {
switch (event.getTaskStatus()) {
case SUCCEEDED:
URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
addKnownMapOutput(u.getHost() + ":" + u.getPort(),
u.toString(),
event.getTaskAttemptId());
maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
break;
case FAILED:
case KILLED:
case OBSOLETE:
obsoleteMapOutput(event.getTaskAttemptId());
LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
" map-task: '" + event.getTaskAttemptId() + "'");
break;
case TIPFAILED:
tipFailed(event.getTaskAttemptId().getTaskID());
LOG.info("Ignoring output of failed map TIP: '" +
event.getTaskAttemptId() + "'");
break;
}
}
static URI getBaseURI(TaskAttemptID reduceId, String url) {
StringBuffer baseUrl = new StringBuffer(url);
if (!url.endsWith("/")) {
baseUrl.append("/");
}
baseUrl.append("mapOutput?job=");
baseUrl.append(reduceId.getJobID());
baseUrl.append("&reduce=");
baseUrl.append(reduceId.getTaskID().getId());
baseUrl.append("&map=");
URI u = URI.create(baseUrl.toString());
return u;
}
public synchronized void copySucceeded(TaskAttemptID mapId,
MapHost host,
long bytes,
long startMillis,
long endMillis,
MapOutput<K,V> output
) throws IOException {
failureCounts.remove(mapId);
hostFailures.remove(host.getHostName());
int mapIndex = mapId.getTaskID().getId();
if (!finishedMaps[mapIndex]) {
output.commit();
finishedMaps[mapIndex] = true;
shuffledMapsCounter.increment(1);
if (--remainingMaps == 0) {
notifyAll();
}
// update single copy task status
long copyMillis = (endMillis - startMillis);
if (copyMillis == 0) copyMillis = 1;
float bytesPerMillis = (float) bytes / copyMillis;
float transferRate = bytesPerMillis * BYTES_PER_MILLIS_TO_MBS;
String individualProgress = "copy task(" + mapId + " succeeded"
+ " at " + mbpsFormat.format(transferRate) + " MB/s)";
// update the aggregated status
copyTimeTracker.add(startMillis, endMillis);
totalBytesShuffledTillNow += bytes;
updateStatus(individualProgress);
reduceShuffleBytes.increment(bytes);
lastProgressTime = Time.monotonicNow();
LOG.debug("map " + mapId + " done " + status.getStateString());
}
}
private synchronized void updateStatus(String individualProgress) {
int mapsDone = totalMaps - remainingMaps;
long totalCopyMillis = copyTimeTracker.getCopyMillis();
if (totalCopyMillis == 0) totalCopyMillis = 1;
float bytesPerMillis = (float) totalBytesShuffledTillNow / totalCopyMillis;
float transferRate = bytesPerMillis * BYTES_PER_MILLIS_TO_MBS;
progress.set((float) mapsDone / totalMaps);
String statusString = mapsDone + " / " + totalMaps + " copied.";
status.setStateString(statusString);
if (individualProgress != null) {
progress.setStatus(individualProgress + " Aggregated copy rate(" +
mapsDone + " of " + totalMaps + " at " +
mbpsFormat.format(transferRate) + " MB/s)");
} else {
progress.setStatus("copy(" + mapsDone + " of " + totalMaps + " at "
+ mbpsFormat.format(transferRate) + " MB/s)");
}
}
private void updateStatus() {
updateStatus(null);
}
public synchronized void hostFailed(String hostname) {
if (hostFailures.containsKey(hostname)) {
IntWritable x = hostFailures.get(hostname);
x.set(x.get() + 1);
} else {
hostFailures.put(hostname, new IntWritable(1));
}
}
public synchronized void copyFailed(TaskAttemptID mapId, MapHost host,
boolean readError, boolean connectExcpt) {
host.penalize();
int failures = 1;
if (failureCounts.containsKey(mapId)) {
IntWritable x = failureCounts.get(mapId);
x.set(x.get() + 1);
failures = x.get();
} else {
failureCounts.put(mapId, new IntWritable(1));
}
String hostname = host.getHostName();
IntWritable hostFailedNum = hostFailures.get(hostname);
// MAPREDUCE-6361: hostname could get cleanup from hostFailures in another
// thread with copySucceeded.
// In this case, add back hostname to hostFailures to get rid of NPE issue.
if (hostFailedNum == null) {
hostFailures.put(hostname, new IntWritable(1));
}
//report failure if already retried maxHostFailures times
boolean hostFail = hostFailures.get(hostname).get() >
getMaxHostFailures() ? true : false;
if (failures >= abortFailureLimit) {
try {
throw new IOException(failures + " failures downloading " + mapId);
} catch (IOException ie) {
reporter.reportException(ie);
}
}
checkAndInformMRAppMaster(failures, mapId, readError, connectExcpt,
hostFail);
checkReducerHealth();
long delay = (long) (INITIAL_PENALTY *
Math.pow(PENALTY_GROWTH_RATE, failures));
if (delay > maxDelay) {
delay = maxDelay;
}
penalties.add(new Penalty(host, delay));
failedShuffleCounter.increment(1);
}
public void reportLocalError(IOException ioe) {
try {
LOG.error("Shuffle failed : local error on this node: "
+ InetAddress.getLocalHost());
} catch (UnknownHostException e) {
LOG.error("Shuffle failed : local error on this node");
}
reporter.reportException(ioe);
}
// Notify the MRAppMaster
// after every read error, if 'reportReadErrorImmediately' is true or
// after every 'maxFetchFailuresBeforeReporting' failures
private void checkAndInformMRAppMaster(
int failures, TaskAttemptID mapId, boolean readError,
boolean connectExcpt, boolean hostFailed) {
if (connectExcpt || (reportReadErrorImmediately && readError)
|| ((failures % maxFetchFailuresBeforeReporting) == 0) || hostFailed) {
LOG.info("Reporting fetch failure for " + mapId + " to MRAppMaster.");
status.addFetchFailedMap((org.apache.hadoop.mapred.TaskAttemptID) mapId);
}
}
private void checkReducerHealth() {
final float MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT = 0.5f;
final float MIN_REQUIRED_PROGRESS_PERCENT = 0.5f;
final float MAX_ALLOWED_STALL_TIME_PERCENT = 0.5f;
long totalFailures = failedShuffleCounter.getValue();
int doneMaps = totalMaps - remainingMaps;
boolean reducerHealthy =
(((float)totalFailures / (totalFailures + doneMaps))
< MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT);
// check if the reducer has progressed enough
boolean reducerProgressedEnough =
(((float)doneMaps / totalMaps)
>= MIN_REQUIRED_PROGRESS_PERCENT);
// check if the reducer is stalled for a long time
// duration for which the reducer is stalled
int stallDuration =
(int)(Time.monotonicNow() - lastProgressTime);
// duration for which the reducer ran with progress
int shuffleProgressDuration =
(int)(lastProgressTime - startTime);
// min time the reducer should run without getting killed
int minShuffleRunDuration =
Math.max(shuffleProgressDuration, maxMapRuntime);
boolean reducerStalled =
(((float)stallDuration / minShuffleRunDuration)
>= MAX_ALLOWED_STALL_TIME_PERCENT);
// kill if not healthy and has insufficient progress
if ((failureCounts.size() >= maxFailedUniqueFetches ||
failureCounts.size() == (totalMaps - doneMaps))
&& !reducerHealthy
&& (!reducerProgressedEnough || reducerStalled)) {
LOG.fatal("Shuffle failed with too many fetch failures " +
"and insufficient progress!");
String errorMsg = "Exceeded MAX_FAILED_UNIQUE_FETCHES; bailing-out.";
reporter.reportException(new IOException(errorMsg));
}
}
public synchronized void tipFailed(TaskID taskId) {
if (!finishedMaps[taskId.getId()]) {
finishedMaps[taskId.getId()] = true;
if (--remainingMaps == 0) {
notifyAll();
}
updateStatus();
}
}
public synchronized void addKnownMapOutput(String hostName,
String hostUrl,
TaskAttemptID mapId) {
MapHost host = mapLocations.get(hostName);
if (host == null) {
host = new MapHost(hostName, hostUrl);
mapLocations.put(hostName, host);
}
host.addKnownMap(mapId);
// Mark the host as pending
if (host.getState() == State.PENDING) {
pendingHosts.add(host);
notifyAll();
}
}
public synchronized void obsoleteMapOutput(TaskAttemptID mapId) {
obsoleteMaps.add(mapId);
}
public synchronized void putBackKnownMapOutput(MapHost host,
TaskAttemptID mapId) {
host.addKnownMap(mapId);
}
public synchronized MapHost getHost() throws InterruptedException {
while(pendingHosts.isEmpty()) {
wait();
}
MapHost host = null;
Iterator<MapHost> iter = pendingHosts.iterator();
int numToPick = random.nextInt(pendingHosts.size());
for (int i=0; i <= numToPick; ++i) {
host = iter.next();
}
pendingHosts.remove(host);
host.markBusy();
LOG.debug("Assigning " + host + " with " + host.getNumKnownMapOutputs() +
" to " + Thread.currentThread().getName());
SHUFFLE_START.set(Time.monotonicNow());
return host;
}
public synchronized List<TaskAttemptID> getMapsForHost(MapHost host) {
List<TaskAttemptID> list = host.getAndClearKnownMaps();
Iterator<TaskAttemptID> itr = list.iterator();
List<TaskAttemptID> result = new ArrayList<TaskAttemptID>();
int includedMaps = 0;
int totalSize = list.size();
// find the maps that we still need, up to the limit
while (itr.hasNext()) {
TaskAttemptID id = itr.next();
if (!obsoleteMaps.contains(id) && !finishedMaps[id.getTaskID().getId()]) {
result.add(id);
if (++includedMaps >= MAX_MAPS_AT_ONCE) {
break;
}
}
}
// put back the maps left after the limit
while (itr.hasNext()) {
TaskAttemptID id = itr.next();
if (!obsoleteMaps.contains(id) && !finishedMaps[id.getTaskID().getId()]) {
host.addKnownMap(id);
}
}
LOG.debug("assigned " + includedMaps + " of " + totalSize + " to " +
host + " to " + Thread.currentThread().getName());
return result;
}
public synchronized void freeHost(MapHost host) {
if (host.getState() != State.PENALIZED) {
if (host.markAvailable() == State.PENDING) {
pendingHosts.add(host);
notifyAll();
}
}
LOG.info(host + " freed by " + Thread.currentThread().getName() + " in " +
(Time.monotonicNow()-SHUFFLE_START.get()) + "ms");
}
public synchronized void resetKnownMaps() {
mapLocations.clear();
obsoleteMaps.clear();
pendingHosts.clear();
}
/**
* Wait until the shuffle finishes or until the timeout.
* @param millis maximum wait time
* @return true if the shuffle is done
* @throws InterruptedException
*/
@Override
public synchronized boolean waitUntilDone(int millis
) throws InterruptedException {
if (remainingMaps > 0) {
wait(millis);
return remainingMaps == 0;
}
return true;
}
/**
* A structure that records the penalty for a host.
*/
private static class Penalty implements Delayed {
MapHost host;
private long endTime;
Penalty(MapHost host, long delay) {
this.host = host;
this.endTime = Time.monotonicNow() + delay;
}
@Override
public long getDelay(TimeUnit unit) {
long remainingTime = endTime - Time.monotonicNow();
return unit.convert(remainingTime, TimeUnit.MILLISECONDS);
}
@Override
public int compareTo(Delayed o) {
long other = ((Penalty) o).endTime;
return endTime == other ? 0 : (endTime < other ? -1 : 1);
}
}
/**
* A thread that takes hosts off of the penalty list when the timer expires.
*/
private class Referee extends Thread {
public Referee() {
setName("ShufflePenaltyReferee");
setDaemon(true);
}
public void run() {
try {
while (true) {
// take the first host that has an expired penalty
MapHost host = penalties.take().host;
synchronized (ShuffleSchedulerImpl.this) {
if (host.markAvailable() == MapHost.State.PENDING) {
pendingHosts.add(host);
ShuffleSchedulerImpl.this.notifyAll();
}
}
}
} catch (InterruptedException ie) {
return;
} catch (Throwable t) {
reporter.reportException(t);
}
}
}
@Override
public void close() throws InterruptedException {
referee.interrupt();
referee.join();
}
public int getMaxHostFailures() {
return maxHostFailures;
}
private static class CopyTimeTracker {
List<Interval> intervals;
long copyMillis;
public CopyTimeTracker() {
intervals = Collections.emptyList();
copyMillis = 0;
}
public void add(long s, long e) {
Interval interval = new Interval(s, e);
copyMillis = getTotalCopyMillis(interval);
}
public long getCopyMillis() {
return copyMillis;
}
// This method captures the time during which any copy was in progress
// each copy time period is record in the Interval list
private long getTotalCopyMillis(Interval newInterval) {
if (newInterval == null) {
return copyMillis;
}
List<Interval> result = new ArrayList<Interval>(intervals.size() + 1);
for (Interval interval: intervals) {
if (interval.end < newInterval.start) {
result.add(interval);
} else if (interval.start > newInterval.end) {
result.add(newInterval);
newInterval = interval;
} else {
newInterval = new Interval(
Math.min(interval.start, newInterval.start),
Math.max(newInterval.end, interval.end));
}
}
result.add(newInterval);
intervals = result;
//compute total millis
long length = 0;
for (Interval interval : intervals) {
length += interval.getIntervalLength();
}
return length;
}
private static class Interval {
final long start;
final long end;
public Interval(long s, long e) {
start = s;
end = e;
}
public long getIntervalLength() {
return end - start;
}
}
}
}
| 20,904 | 32.881686 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapred.RawKeyValueIterator;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Task.CombineOutputCollector;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.util.Progress;
import java.io.IOException;
/**
* An interface for a reduce side merge that works with the default Shuffle
* implementation.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface MergeManager<K, V> {
/**
* To wait until merge has some freed resources available so that it can
* accept shuffled data. This will be called before a network connection is
* established to get the map output.
*/
public void waitForResource() throws InterruptedException;
/**
* To reserve resources for data to be shuffled. This will be called after
* a network connection is made to shuffle the data.
* @param mapId mapper from which data will be shuffled.
* @param requestedSize size in bytes of data that will be shuffled.
* @param fetcher id of the map output fetcher that will shuffle the data.
* @return a MapOutput object that can be used by shuffle to shuffle data. If
* required resources cannot be reserved immediately, a null can be returned.
*/
public MapOutput<K, V> reserve(TaskAttemptID mapId, long requestedSize,
int fetcher) throws IOException;
/**
* Called at the end of shuffle.
* @return a key value iterator object.
*/
public RawKeyValueIterator close() throws Throwable;
}
| 2,865 | 39.366197 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/annotation/Checkpointable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Contract representing to the framework that the task can be safely preempted
* and restarted between invocations of the user-defined function.
*
* This is often true when the result of a function does not rely on state
* derived from previous elements in the record stream, but the guarantee is
* left as an exercise to the implementor.
*/
@Documented
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public @interface Checkpointable { }
| 1,702 | 38.604651 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains the implementations of different types of
* map-reduce counters.
*
* cf. MAPREDUCE-901 for rationales.
*/
@InterfaceStability.Evolving
package org.apache.hadoop.mapreduce.counters;
import org.apache.hadoop.classification.InterfaceStability;
| 1,082 | 36.344828 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.getFrameworkGroupId;
import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.isFrameworkGroup;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.util.StringInterner;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
/**
* An abstract class to provide common implementation for the Counters
* container in both mapred and mapreduce packages.
*
* @param <C> type of counter inside the counters
* @param <G> type of group inside the counters
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class AbstractCounters<C extends Counter,
G extends CounterGroupBase<C>>
implements Writable, Iterable<G> {
protected static final Log LOG = LogFactory.getLog("mapreduce.Counters");
/**
* A cache from enum values to the associated counter.
*/
private final Map<Enum<?>, C> cache = Maps.newIdentityHashMap();
//framework & fs groups
private final Map<String, G> fgroups = new ConcurrentSkipListMap<String, G>();
// other groups
private final Map<String, G> groups = new ConcurrentSkipListMap<String, G>();
private final CounterGroupFactory<C, G> groupFactory;
// For framework counter serialization without strings
enum GroupType { FRAMEWORK, FILESYSTEM };
// Writes only framework and fs counters if false.
private boolean writeAllCounters = true;
private static final Map<String, String> legacyMap = Maps.newHashMap();
static {
legacyMap.put("org.apache.hadoop.mapred.Task$Counter",
TaskCounter.class.getName());
legacyMap.put("org.apache.hadoop.mapred.JobInProgress$Counter",
JobCounter.class.getName());
legacyMap.put("FileSystemCounters", FileSystemCounter.class.getName());
}
private final Limits limits = new Limits();
@InterfaceAudience.Private
public AbstractCounters(CounterGroupFactory<C, G> gf) {
groupFactory = gf;
}
/**
* Construct from another counters object.
* @param <C1> type of the other counter
* @param <G1> type of the other counter group
* @param counters the counters object to copy
* @param groupFactory the factory for new groups
*/
@InterfaceAudience.Private
public <C1 extends Counter, G1 extends CounterGroupBase<C1>>
AbstractCounters(AbstractCounters<C1, G1> counters,
CounterGroupFactory<C, G> groupFactory) {
this.groupFactory = groupFactory;
for(G1 group: counters) {
String name = group.getName();
G newGroup = groupFactory.newGroup(name, group.getDisplayName(), limits);
(isFrameworkGroup(name) ? fgroups : groups).put(name, newGroup);
for(Counter counter: group) {
newGroup.addCounter(counter.getName(), counter.getDisplayName(),
counter.getValue());
}
}
}
/** Add a group.
* @param group object to add
* @return the group
*/
@InterfaceAudience.Private
public synchronized G addGroup(G group) {
String name = group.getName();
if (isFrameworkGroup(name)) {
fgroups.put(name, group);
} else {
limits.checkGroups(groups.size() + 1);
groups.put(name, group);
}
return group;
}
/**
* Add a new group
* @param name of the group
* @param displayName of the group
* @return the group
*/
@InterfaceAudience.Private
public G addGroup(String name, String displayName) {
return addGroup(groupFactory.newGroup(name, displayName, limits));
}
/**
* Find a counter, create one if necessary
* @param groupName of the counter
* @param counterName name of the counter
* @return the matching counter
*/
public C findCounter(String groupName, String counterName) {
G grp = getGroup(groupName);
return grp.findCounter(counterName);
}
/**
* Find the counter for the given enum. The same enum will always return the
* same counter.
* @param key the counter key
* @return the matching counter object
*/
public synchronized C findCounter(Enum<?> key) {
C counter = cache.get(key);
if (counter == null) {
counter = findCounter(key.getDeclaringClass().getName(), key.name());
cache.put(key, counter);
}
return counter;
}
/**
* Find the file system counter for the given scheme and enum.
* @param scheme of the file system
* @param key the enum of the counter
* @return the file system counter
*/
@InterfaceAudience.Private
public synchronized C findCounter(String scheme, FileSystemCounter key) {
return ((FileSystemCounterGroup<C>) getGroup(
FileSystemCounter.class.getName()).getUnderlyingGroup()).
findCounter(scheme, key);
}
/**
* Returns the names of all counter classes.
* @return Set of counter names.
*/
public synchronized Iterable<String> getGroupNames() {
HashSet<String> deprecated = new HashSet<String>();
for(Map.Entry<String, String> entry : legacyMap.entrySet()) {
String newGroup = entry.getValue();
boolean isFGroup = isFrameworkGroup(newGroup);
if(isFGroup ? fgroups.containsKey(newGroup) : groups.containsKey(newGroup)) {
deprecated.add(entry.getKey());
}
}
return Iterables.concat(fgroups.keySet(), groups.keySet(), deprecated);
}
@Override
public Iterator<G> iterator() {
return Iterators.concat(fgroups.values().iterator(),
groups.values().iterator());
}
/**
* Returns the named counter group, or an empty group if there is none
* with the specified name.
* @param groupName name of the group
* @return the group
*/
public synchronized G getGroup(String groupName) {
// filterGroupName
boolean groupNameInLegacyMap = true;
String newGroupName = legacyMap.get(groupName);
if (newGroupName == null) {
groupNameInLegacyMap = false;
newGroupName = Limits.filterGroupName(groupName);
}
boolean isFGroup = isFrameworkGroup(newGroupName);
G group = isFGroup ? fgroups.get(newGroupName) : groups.get(newGroupName);
if (group == null) {
group = groupFactory.newGroup(newGroupName, limits);
if (isFGroup) {
fgroups.put(newGroupName, group);
} else {
limits.checkGroups(groups.size() + 1);
groups.put(newGroupName, group);
}
if (groupNameInLegacyMap) {
LOG.warn("Group " + groupName + " is deprecated. Use " + newGroupName
+ " instead");
}
}
return group;
}
/**
* Returns the total number of counters, by summing the number of counters
* in each group.
* @return the total number of counters
*/
public synchronized int countCounters() {
int result = 0;
for (G group : this) {
result += group.size();
}
return result;
}
/**
* Write the set of groups.
* Counters ::= version #fgroups (groupId, group)* #groups (group)*
*/
@Override
public synchronized void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, groupFactory.version());
WritableUtils.writeVInt(out, fgroups.size()); // framework groups first
for (G group : fgroups.values()) {
if (group.getUnderlyingGroup() instanceof FrameworkCounterGroup<?, ?>) {
WritableUtils.writeVInt(out, GroupType.FRAMEWORK.ordinal());
WritableUtils.writeVInt(out, getFrameworkGroupId(group.getName()));
group.write(out);
} else if (group.getUnderlyingGroup() instanceof FileSystemCounterGroup<?>) {
WritableUtils.writeVInt(out, GroupType.FILESYSTEM.ordinal());
group.write(out);
}
}
if (writeAllCounters) {
WritableUtils.writeVInt(out, groups.size());
for (G group : groups.values()) {
Text.writeString(out, group.getName());
group.write(out);
}
} else {
WritableUtils.writeVInt(out, 0);
}
}
@Override
public synchronized void readFields(DataInput in) throws IOException {
int version = WritableUtils.readVInt(in);
if (version != groupFactory.version()) {
throw new IOException("Counters version mismatch, expected "+
groupFactory.version() +" got "+ version);
}
int numFGroups = WritableUtils.readVInt(in);
fgroups.clear();
GroupType[] groupTypes = GroupType.values();
while (numFGroups-- > 0) {
GroupType groupType = groupTypes[WritableUtils.readVInt(in)];
G group;
switch (groupType) {
case FILESYSTEM: // with nothing
group = groupFactory.newFileSystemGroup();
break;
case FRAMEWORK: // with group id
group = groupFactory.newFrameworkGroup(WritableUtils.readVInt(in));
break;
default: // Silence dumb compiler, as it would've thrown earlier
throw new IOException("Unexpected counter group type: "+ groupType);
}
group.readFields(in);
fgroups.put(group.getName(), group);
}
int numGroups = WritableUtils.readVInt(in);
if (!groups.isEmpty()) {
groups.clear();
limits.reset();
}
while (numGroups-- > 0) {
limits.checkGroups(groups.size() + 1);
G group = groupFactory.newGenericGroup(
StringInterner.weakIntern(Text.readString(in)), null, limits);
group.readFields(in);
groups.put(group.getName(), group);
}
}
/**
* Return textual representation of the counter values.
* @return the string
*/
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder("Counters: " + countCounters());
for (G group: this) {
sb.append("\n\t").append(group.getDisplayName());
for (Counter counter: group) {
sb.append("\n\t\t").append(counter.getDisplayName()).append("=")
.append(counter.getValue());
}
}
return sb.toString();
}
/**
* Increments multiple counters by their amounts in another Counters
* instance.
* @param other the other Counters instance
*/
public synchronized void incrAllCounters(AbstractCounters<C, G> other) {
for(G right : other) {
String groupName = right.getName();
G left = (isFrameworkGroup(groupName) ? fgroups : groups).get(groupName);
if (left == null) {
left = addGroup(groupName, right.getDisplayName());
}
left.incrAllCounters(right);
}
}
@Override
@SuppressWarnings("unchecked")
public boolean equals(Object genericRight) {
if (genericRight instanceof AbstractCounters<?, ?>) {
return Iterators.elementsEqual(iterator(),
((AbstractCounters<C, G>)genericRight).iterator());
}
return false;
}
@Override
public int hashCode() {
return groups.hashCode();
}
/**
* Set the "writeAllCounters" option to true or false
* @param send if true all counters would be serialized, otherwise only
* framework counters would be serialized in
* {@link #write(DataOutput)}
*/
@InterfaceAudience.Private
public void setWriteAllCounters(boolean send) {
writeAllCounters = send;
}
/**
* Get the "writeAllCounters" option
* @return true of all counters would serialized
*/
@InterfaceAudience.Private
public boolean getWriteAllCounters() {
return writeAllCounters;
}
@InterfaceAudience.Private
public Limits limits() {
return limits;
}
}
| 12,995 | 31.818182 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.util.ResourceBundles;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.Iterators;
/**
* An abstract class to provide common implementation for the framework
* counter group in both mapred and mapreduce packages.
*
* @param <T> type of the counter enum class
* @param <C> type of the counter
*/
@InterfaceAudience.Private
public abstract class FrameworkCounterGroup<T extends Enum<T>,
C extends Counter> implements CounterGroupBase<C> {
private static final Log LOG = LogFactory.getLog(FrameworkCounterGroup.class);
private final Class<T> enumClass; // for Enum.valueOf
private final Object[] counters; // local casts are OK and save a class ref
private String displayName = null;
/**
* A counter facade for framework counters.
* Use old (which extends new) interface to make compatibility easier.
*/
@InterfaceAudience.Private
public static class FrameworkCounter<T extends Enum<T>> extends AbstractCounter {
final T key;
final String groupName;
private long value;
public FrameworkCounter(T ref, String groupName) {
key = ref;
this.groupName = groupName;
}
@Private
public T getKey() {
return key;
}
@Private
public String getGroupName() {
return groupName;
}
@Override
public String getName() {
return key.name();
}
@Override
public String getDisplayName() {
return ResourceBundles.getCounterName(groupName, getName(), getName());
}
@Override
public long getValue() {
return value;
}
@Override
public void setValue(long value) {
this.value = value;
}
@Override
public void increment(long incr) {
value += incr;
}
@Override
public void write(DataOutput out) throws IOException {
assert false : "shouldn't be called";
}
@Override
public void readFields(DataInput in) throws IOException {
assert false : "shouldn't be called";
}
@Override
public Counter getUnderlyingCounter() {
return this;
}
}
@SuppressWarnings("unchecked")
public FrameworkCounterGroup(Class<T> enumClass) {
this.enumClass = enumClass;
T[] enums = enumClass.getEnumConstants();
counters = new Object[enums.length];
}
@Override
public String getName() {
return enumClass.getName();
}
@Override
public String getDisplayName() {
if (displayName == null) {
displayName = ResourceBundles.getCounterGroupName(getName(), getName());
}
return displayName;
}
@Override
public void setDisplayName(String displayName) {
this.displayName = displayName;
}
private T valueOf(String name) {
return Enum.valueOf(enumClass, name);
}
@Override
public void addCounter(C counter) {
C ours = findCounter(counter.getName());
if (ours != null) {
ours.setValue(counter.getValue());
} else {
LOG.warn(counter.getName() + "is not a known counter.");
}
}
@Override
public C addCounter(String name, String displayName, long value) {
C counter = findCounter(name);
if (counter != null) {
counter.setValue(value);
} else {
LOG.warn(name + "is not a known counter.");
}
return counter;
}
@Override
public C findCounter(String counterName, String displayName) {
return findCounter(counterName);
}
@Override
public C findCounter(String counterName, boolean create) {
try {
return findCounter(valueOf(counterName));
}
catch (Exception e) {
if (create) throw new IllegalArgumentException(e);
return null;
}
}
@Override
public C findCounter(String counterName) {
try {
T enumValue = valueOf(counterName);
return findCounter(enumValue);
} catch (IllegalArgumentException e) {
LOG.warn(counterName + " is not a recognized counter.");
return null;
}
}
@SuppressWarnings("unchecked")
private C findCounter(T key) {
int i = key.ordinal();
if (counters[i] == null) {
counters[i] = newCounter(key);
}
return (C) counters[i];
}
/**
* Abstract factory method for new framework counter
* @param key for the enum value of a counter
* @return a new counter for the key
*/
protected abstract C newCounter(T key);
@Override
public int size() {
int n = 0;
for (int i = 0; i < counters.length; ++i) {
if (counters[i] != null) ++n;
}
return n;
}
@Override
@SuppressWarnings("rawtypes")
public void incrAllCounters(CounterGroupBase<C> other) {
if (checkNotNull(other, "other counter group")
instanceof FrameworkCounterGroup<?, ?>) {
for (Counter counter : other) {
C c = findCounter(((FrameworkCounter) counter).key.name());
if (c != null) {
c.increment(counter.getValue());
}
}
}
}
/**
* FrameworkGroup ::= #counter (key value)*
*/
@Override
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, size());
for (int i = 0; i < counters.length; ++i) {
Counter counter = (C) counters[i];
if (counter != null) {
WritableUtils.writeVInt(out, i);
WritableUtils.writeVLong(out, counter.getValue());
}
}
}
@Override
public void readFields(DataInput in) throws IOException {
clear();
int len = WritableUtils.readVInt(in);
T[] enums = enumClass.getEnumConstants();
for (int i = 0; i < len; ++i) {
int ord = WritableUtils.readVInt(in);
Counter counter = newCounter(enums[ord]);
counter.setValue(WritableUtils.readVLong(in));
counters[ord] = counter;
}
}
private void clear() {
for (int i = 0; i < counters.length; ++i) {
counters[i] = null;
}
}
@Override
public Iterator<C> iterator() {
return new AbstractIterator<C>() {
int i = 0;
@Override
protected C computeNext() {
while (i < counters.length) {
@SuppressWarnings("unchecked")
C counter = (C) counters[i++];
if (counter != null) return counter;
}
return endOfData();
}
};
}
@Override
public boolean equals(Object genericRight) {
if (genericRight instanceof CounterGroupBase<?>) {
@SuppressWarnings("unchecked")
CounterGroupBase<C> right = (CounterGroupBase<C>) genericRight;
return Iterators.elementsEqual(iterator(), right.iterator());
}
return false;
}
@Override
public synchronized int hashCode() {
// need to be deep as counters is an array
return Arrays.deepHashCode(new Object[]{enumClass, counters, displayName});
}
}
| 8,108 | 25.586885 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import com.google.common.base.Objects;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.mapreduce.Counter;
/**
* An abstract counter class to provide common implementation of
* the counter interface in both mapred and mapreduce packages.
*/
@InterfaceAudience.Private
public abstract class AbstractCounter implements Counter {
@Override @Deprecated
public void setDisplayName(String name) {}
@Override
public synchronized boolean equals(Object genericRight) {
if (genericRight instanceof Counter) {
synchronized (genericRight) {
Counter right = (Counter) genericRight;
return getName().equals(right.getName()) &&
getDisplayName().equals(right.getDisplayName()) &&
getValue() == right.getValue();
}
}
return false;
}
@Override
public synchronized int hashCode() {
return Objects.hashCode(getName(), getDisplayName(), getValue());
}
}
| 1,819 | 33.339623 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/GenericCounter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.util.StringInterner;
/**
* A generic counter implementation
*/
@InterfaceAudience.Private
public class GenericCounter extends AbstractCounter {
private String name;
private String displayName;
private long value = 0;
public GenericCounter() {
// mostly for readFields
}
public GenericCounter(String name, String displayName) {
this.name = name;
this.displayName = displayName;
}
public GenericCounter(String name, String displayName, long value) {
this.name = name;
this.displayName = displayName;
this.value = value;
}
@Override @Deprecated
public synchronized void setDisplayName(String displayName) {
this.displayName = displayName;
}
@Override
public synchronized void readFields(DataInput in) throws IOException {
name = StringInterner.weakIntern(Text.readString(in));
displayName = in.readBoolean() ?
StringInterner.weakIntern(Text.readString(in)) : name;
value = WritableUtils.readVLong(in);
}
/**
* GenericCounter ::= keyName isDistinctDisplayName [displayName] value
*/
@Override
public synchronized void write(DataOutput out) throws IOException {
Text.writeString(out, name);
boolean distinctDisplayName = ! name.equals(displayName);
out.writeBoolean(distinctDisplayName);
if (distinctDisplayName) {
Text.writeString(out, displayName);
}
WritableUtils.writeVLong(out, value);
}
@Override
public synchronized String getName() {
return name;
}
@Override
public synchronized String getDisplayName() {
return displayName;
}
@Override
public synchronized long getValue() {
return value;
}
@Override
public synchronized void setValue(long value) {
this.value = value;
}
@Override
public synchronized void increment(long incr) {
value += incr;
}
@Override
public Counter getUnderlyingCounter() {
return this;
}
}
| 3,056 | 26.053097 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Iterator;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.util.ResourceBundles;
import org.apache.hadoop.util.StringInterner;
import com.google.common.collect.Iterators;
/**
* An abstract class to provide common implementation of the
* generic counter group in both mapred and mapreduce package.
*
* @param <T> type of the counter for the group
*/
@InterfaceAudience.Private
public abstract class AbstractCounterGroup<T extends Counter>
implements CounterGroupBase<T> {
private final String name;
private String displayName;
private final ConcurrentMap<String, T> counters =
new ConcurrentSkipListMap<String, T>();
private final Limits limits;
public AbstractCounterGroup(String name, String displayName,
Limits limits) {
this.name = name;
this.displayName = displayName;
this.limits = limits;
}
@Override
public String getName() {
return name;
}
@Override
public synchronized String getDisplayName() {
return displayName;
}
@Override
public synchronized void setDisplayName(String displayName) {
this.displayName = displayName;
}
@Override
public synchronized void addCounter(T counter) {
counters.put(counter.getName(), counter);
limits.incrCounters();
}
@Override
public synchronized T addCounter(String counterName, String displayName,
long value) {
String saveName = Limits.filterCounterName(counterName);
T counter = findCounterImpl(saveName, false);
if (counter == null) {
return addCounterImpl(saveName, displayName, value);
}
counter.setValue(value);
return counter;
}
private T addCounterImpl(String name, String displayName, long value) {
T counter = newCounter(name, displayName, value);
addCounter(counter);
return counter;
}
@Override
public synchronized T findCounter(String counterName, String displayName) {
// Take lock to avoid two threads not finding a counter and trying to add
// the same counter.
String saveName = Limits.filterCounterName(counterName);
T counter = findCounterImpl(saveName, false);
if (counter == null) {
return addCounterImpl(saveName, displayName, 0);
}
return counter;
}
@Override
public T findCounter(String counterName, boolean create) {
return findCounterImpl(Limits.filterCounterName(counterName), create);
}
// Lock the object. Cannot simply use concurrent constructs on the counters
// data-structure (like putIfAbsent) because of localization, limits etc.
private synchronized T findCounterImpl(String counterName, boolean create) {
T counter = counters.get(counterName);
if (counter == null && create) {
String localized =
ResourceBundles.getCounterName(getName(), counterName, counterName);
return addCounterImpl(counterName, localized, 0);
}
return counter;
}
@Override
public T findCounter(String counterName) {
return findCounter(counterName, true);
}
/**
* Abstract factory method to create a new counter of type T
* @param counterName of the counter
* @param displayName of the counter
* @param value of the counter
* @return a new counter
*/
protected abstract T newCounter(String counterName, String displayName,
long value);
/**
* Abstract factory method to create a new counter of type T
* @return a new counter object
*/
protected abstract T newCounter();
@Override
public Iterator<T> iterator() {
return counters.values().iterator();
}
/**
* GenericGroup ::= displayName #counter counter*
*/
@Override
public synchronized void write(DataOutput out) throws IOException {
Text.writeString(out, displayName);
WritableUtils.writeVInt(out, counters.size());
for(Counter counter: counters.values()) {
counter.write(out);
}
}
@Override
public synchronized void readFields(DataInput in) throws IOException {
displayName = StringInterner.weakIntern(Text.readString(in));
counters.clear();
int size = WritableUtils.readVInt(in);
for (int i = 0; i < size; i++) {
T counter = newCounter();
counter.readFields(in);
counters.put(counter.getName(), counter);
limits.incrCounters();
}
}
@Override
public synchronized int size() {
return counters.size();
}
@Override
public synchronized boolean equals(Object genericRight) {
if (genericRight instanceof CounterGroupBase<?>) {
@SuppressWarnings("unchecked")
CounterGroupBase<T> right = (CounterGroupBase<T>) genericRight;
return Iterators.elementsEqual(iterator(), right.iterator());
}
return false;
}
@Override
public synchronized int hashCode() {
return counters.hashCode();
}
@Override
public void incrAllCounters(CounterGroupBase<T> rightGroup) {
try {
for (Counter right : rightGroup) {
Counter left = findCounter(right.getName(), right.getDisplayName());
left.increment(right.getValue());
}
} catch (LimitExceededException e) {
counters.clear();
throw e;
}
}
}
| 6,385 | 29.122642 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.util.ResourceBundles;
/**
* An abstract class to provide common implementation of the
* group factory in both mapred and mapreduce packages.
*
* @param <C> type of the counter
* @param <G> type of the group
*/
@InterfaceAudience.Private
public abstract class CounterGroupFactory<C extends Counter,
G extends CounterGroupBase<C>> {
public interface FrameworkGroupFactory<F> {
F newGroup(String name);
}
// Integer mapping (for serialization) for framework groups
private static final Map<String, Integer> s2i = Maps.newHashMap();
private static final List<String> i2s = Lists.newArrayList();
private static final int VERSION = 1;
private static final String FS_GROUP_NAME = FileSystemCounter.class.getName();
private final Map<String, FrameworkGroupFactory<G>> fmap = Maps.newHashMap();
{
// Add builtin counter class here and the version when changed.
addFrameworkGroup(TaskCounter.class);
addFrameworkGroup(JobCounter.class);
}
// Initialize the framework counter group mapping
private synchronized <T extends Enum<T>>
void addFrameworkGroup(final Class<T> cls) {
updateFrameworkGroupMapping(cls);
fmap.put(cls.getName(), newFrameworkGroupFactory(cls));
}
// Update static mappings (c2i, i2s) of framework groups
private static synchronized void updateFrameworkGroupMapping(Class<?> cls) {
String name = cls.getName();
Integer i = s2i.get(name);
if (i != null) return;
i2s.add(name);
s2i.put(name, i2s.size() - 1);
}
/**
* Required override to return a new framework group factory
* @param <T> type of the counter enum class
* @param cls the counter enum class
* @return a new framework group factory
*/
protected abstract <T extends Enum<T>>
FrameworkGroupFactory<G> newFrameworkGroupFactory(Class<T> cls);
/**
* Create a new counter group
* @param name of the group
* @param limits the counters limits policy object
* @return a new counter group
*/
public G newGroup(String name, Limits limits) {
return newGroup(name, ResourceBundles.getCounterGroupName(name, name),
limits);
}
/**
* Create a new counter group
* @param name of the group
* @param displayName of the group
* @param limits the counters limits policy object
* @return a new counter group
*/
public G newGroup(String name, String displayName, Limits limits) {
FrameworkGroupFactory<G> gf = fmap.get(name);
if (gf != null) return gf.newGroup(name);
if (name.equals(FS_GROUP_NAME)) {
return newFileSystemGroup();
} else if (s2i.get(name) != null) {
return newFrameworkGroup(s2i.get(name));
}
return newGenericGroup(name, displayName, limits);
}
/**
* Create a new framework group
* @param id of the group
* @return a new framework group
*/
public G newFrameworkGroup(int id) {
String name;
synchronized(CounterGroupFactory.class) {
if (id < 0 || id >= i2s.size()) throwBadFrameGroupIdException(id);
name = i2s.get(id); // should not throw here.
}
FrameworkGroupFactory<G> gf = fmap.get(name);
if (gf == null) throwBadFrameGroupIdException(id);
return gf.newGroup(name);
}
/**
* Get the id of a framework group
* @param name of the group
* @return the framework group id
*/
public static synchronized int getFrameworkGroupId(String name) {
Integer i = s2i.get(name);
if (i == null) throwBadFrameworkGroupNameException(name);
return i;
}
/**
* @return the counter factory version
*/
public int version() {
return VERSION;
}
/**
* Check whether a group name is a name of a framework group (including
* the filesystem group).
*
* @param name to check
* @return true for framework group names
*/
public static synchronized boolean isFrameworkGroup(String name) {
return s2i.get(name) != null || name.equals(FS_GROUP_NAME);
}
private static void throwBadFrameGroupIdException(int id) {
throw new IllegalArgumentException("bad framework group id: "+ id);
}
private static void throwBadFrameworkGroupNameException(String name) {
throw new IllegalArgumentException("bad framework group name: "+ name);
}
/**
* Abstract factory method to create a generic (vs framework) counter group
* @param name of the group
* @param displayName of the group
* @param limits limits of the counters
* @return a new generic counter group
*/
protected abstract G newGenericGroup(String name, String displayName,
Limits limits);
/**
* Abstract factory method to create a file system counter group
* @return a new file system counter group
*/
protected abstract G newFileSystemGroup();
}
| 6,080 | 31.87027 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.Iterator;
import java.util.Map;
import com.google.common.base.Joiner;
import static com.google.common.base.Preconditions.*;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.util.ResourceBundles;
import org.apache.hadoop.util.StringUtils;
/**
* An abstract class to provide common implementation of the filesystem
* counter group in both mapred and mapreduce packages.
*
* @param <C> the type of the Counter for the group
*/
@InterfaceAudience.Private
public abstract class FileSystemCounterGroup<C extends Counter>
implements CounterGroupBase<C> {
static final int MAX_NUM_SCHEMES = 100; // intern/sanity check
static final ConcurrentMap<String, String> schemes = Maps.newConcurrentMap();
private static final Log LOG = LogFactory.getLog(FileSystemCounterGroup.class);
// C[] would need Array.newInstance which requires a Class<C> reference.
// Just a few local casts probably worth not having to carry it around.
private final Map<String, Object[]> map =
new ConcurrentSkipListMap<String, Object[]>();
private String displayName;
private static final Joiner NAME_JOINER = Joiner.on('_');
private static final Joiner DISP_JOINER = Joiner.on(": ");
@InterfaceAudience.Private
public static class FSCounter extends AbstractCounter {
final String scheme;
final FileSystemCounter key;
private long value;
public FSCounter(String scheme, FileSystemCounter ref) {
this.scheme = scheme;
key = ref;
}
@Private
public String getScheme() {
return scheme;
}
@Private
public FileSystemCounter getFileSystemCounter() {
return key;
}
@Override
public String getName() {
return NAME_JOINER.join(scheme, key.name());
}
@Override
public String getDisplayName() {
return DISP_JOINER.join(scheme, localizeCounterName(key.name()));
}
protected String localizeCounterName(String counterName) {
return ResourceBundles.getCounterName(FileSystemCounter.class.getName(),
counterName, counterName);
}
@Override
public long getValue() {
return value;
}
@Override
public void setValue(long value) {
this.value = value;
}
@Override
public void increment(long incr) {
value += incr;
}
@Override
public void write(DataOutput out) throws IOException {
assert false : "shouldn't be called";
}
@Override
public void readFields(DataInput in) throws IOException {
assert false : "shouldn't be called";
}
@Override
public Counter getUnderlyingCounter() {
return this;
}
}
@Override
public String getName() {
return FileSystemCounter.class.getName();
}
@Override
public String getDisplayName() {
if (displayName == null) {
displayName = ResourceBundles.getCounterGroupName(getName(),
"File System Counters");
}
return displayName;
}
@Override
public void setDisplayName(String displayName) {
this.displayName = displayName;
}
@Override
public void addCounter(C counter) {
C ours;
if (counter instanceof FileSystemCounterGroup.FSCounter) {
FSCounter c = (FSCounter) counter;
ours = findCounter(c.scheme, c.key);
}
else {
ours = findCounter(counter.getName());
}
if (ours != null) {
ours.setValue(counter.getValue());
}
}
@Override
public C addCounter(String name, String displayName, long value) {
C counter = findCounter(name);
if (counter != null) {
counter.setValue(value);
}
return counter;
}
// Parse generic counter name into [scheme, key]
private String[] parseCounterName(String counterName) {
int schemeEnd = counterName.indexOf('_');
if (schemeEnd < 0) {
throw new IllegalArgumentException("bad fs counter name");
}
return new String[]{counterName.substring(0, schemeEnd),
counterName.substring(schemeEnd + 1)};
}
@Override
public C findCounter(String counterName, String displayName) {
return findCounter(counterName);
}
@Override
public C findCounter(String counterName, boolean create) {
try {
String[] pair = parseCounterName(counterName);
return findCounter(pair[0], FileSystemCounter.valueOf(pair[1]));
}
catch (Exception e) {
if (create) throw new IllegalArgumentException(e);
LOG.warn(counterName + " is not a recognized counter.");
return null;
}
}
@Override
public C findCounter(String counterName) {
return findCounter(counterName, false);
}
@SuppressWarnings("unchecked")
public synchronized C findCounter(String scheme, FileSystemCounter key) {
final String canonicalScheme = checkScheme(scheme);
Object[] counters = map.get(canonicalScheme);
int ord = key.ordinal();
if (counters == null) {
counters = new Object[FileSystemCounter.values().length];
map.put(canonicalScheme, counters);
counters[ord] = newCounter(canonicalScheme, key);
}
else if (counters[ord] == null) {
counters[ord] = newCounter(canonicalScheme, key);
}
return (C) counters[ord];
}
private String checkScheme(String scheme) {
String fixed = StringUtils.toUpperCase(scheme);
String interned = schemes.putIfAbsent(fixed, fixed);
if (schemes.size() > MAX_NUM_SCHEMES) {
// mistakes or abuses
throw new IllegalArgumentException("too many schemes? "+ schemes.size() +
" when process scheme: "+ scheme);
}
return interned == null ? fixed : interned;
}
/**
* Abstract factory method to create a file system counter
* @param scheme of the file system
* @param key the enum of the file system counter
* @return a new file system counter
*/
protected abstract C newCounter(String scheme, FileSystemCounter key);
@Override
public int size() {
int n = 0;
for (Object[] counters : map.values()) {
n += numSetCounters(counters);
}
return n;
}
@Override
@SuppressWarnings("unchecked")
public void incrAllCounters(CounterGroupBase<C> other) {
if (checkNotNull(other.getUnderlyingGroup(), "other group")
instanceof FileSystemCounterGroup<?>) {
for (Counter counter : other) {
FSCounter c = (FSCounter) ((Counter)counter).getUnderlyingCounter();
findCounter(c.scheme, c.key) .increment(counter.getValue());
}
}
}
/**
* FileSystemGroup ::= #scheme (scheme #counter (key value)*)*
*/
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, map.size()); // #scheme
for (Map.Entry<String, Object[]> entry : map.entrySet()) {
WritableUtils.writeString(out, entry.getKey()); // scheme
// #counter for the above scheme
WritableUtils.writeVInt(out, numSetCounters(entry.getValue()));
for (Object counter : entry.getValue()) {
if (counter == null) continue;
@SuppressWarnings("unchecked")
FSCounter c = (FSCounter) ((Counter)counter).getUnderlyingCounter();
WritableUtils.writeVInt(out, c.key.ordinal()); // key
WritableUtils.writeVLong(out, c.getValue()); // value
}
}
}
private int numSetCounters(Object[] counters) {
int n = 0;
for (Object counter : counters) if (counter != null) ++n;
return n;
}
@Override
public void readFields(DataInput in) throws IOException {
int numSchemes = WritableUtils.readVInt(in); // #scheme
FileSystemCounter[] enums = FileSystemCounter.values();
for (int i = 0; i < numSchemes; ++i) {
String scheme = WritableUtils.readString(in); // scheme
int numCounters = WritableUtils.readVInt(in); // #counter
for (int j = 0; j < numCounters; ++j) {
findCounter(scheme, enums[WritableUtils.readVInt(in)]) // key
.setValue(WritableUtils.readVLong(in)); // value
}
}
}
@Override
public Iterator<C> iterator() {
return new AbstractIterator<C>() {
Iterator<Object[]> it = map.values().iterator();
Object[] counters = it.hasNext() ? it.next() : null;
int i = 0;
@Override
protected C computeNext() {
while (counters != null) {
while (i < counters.length) {
@SuppressWarnings("unchecked")
C counter = (C) counters[i++];
if (counter != null) return counter;
}
i = 0;
counters = it.hasNext() ? it.next() : null;
}
return endOfData();
}
};
}
@Override
public synchronized boolean equals(Object genericRight) {
if (genericRight instanceof CounterGroupBase<?>) {
@SuppressWarnings("unchecked")
CounterGroupBase<C> right = (CounterGroupBase<C>) genericRight;
return Iterators.elementsEqual(iterator(), right.iterator());
}
return false;
}
@Override
public synchronized int hashCode() {
// need to be deep as counters is an array
int hash = FileSystemCounter.class.hashCode();
for (Object[] counters : map.values()) {
if (counters != null) hash ^= Arrays.hashCode(counters);
}
return hash;
}
}
| 10,776 | 29.703704 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/LimitExceededException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.mapred.Counters.CountersExceededException;
@InterfaceAudience.Private
public class LimitExceededException extends CountersExceededException {
private static final long serialVersionUID = 1L;
public LimitExceededException(String msg) {
super(msg);
}
// Only allows chaining of related exceptions
public LimitExceededException(LimitExceededException cause) {
super(cause);
}
}
| 1,337 | 34.210526 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import static org.apache.hadoop.mapreduce.MRJobConfig.*;
@InterfaceAudience.Private
public class Limits {
private int totalCounters;
private LimitExceededException firstViolation;
private static boolean isInited;
private static int GROUP_NAME_MAX;
private static int COUNTER_NAME_MAX;
private static int GROUPS_MAX;
private static int COUNTERS_MAX;
public synchronized static void init(Configuration conf) {
if (!isInited) {
if (conf == null) {
conf = new JobConf();
}
GROUP_NAME_MAX = conf.getInt(COUNTER_GROUP_NAME_MAX_KEY,
COUNTER_GROUP_NAME_MAX_DEFAULT);
COUNTER_NAME_MAX = conf.getInt(COUNTER_NAME_MAX_KEY,
COUNTER_NAME_MAX_DEFAULT);
GROUPS_MAX = conf.getInt(COUNTER_GROUPS_MAX_KEY, COUNTER_GROUPS_MAX_DEFAULT);
COUNTERS_MAX = conf.getInt(COUNTERS_MAX_KEY, COUNTERS_MAX_DEFAULT);
}
isInited = true;
}
public static int getGroupNameMax() {
if (!isInited) {
init(null);
}
return GROUP_NAME_MAX;
}
public static int getCounterNameMax() {
if (!isInited) {
init(null);
}
return COUNTER_NAME_MAX;
}
public static int getGroupsMax() {
if (!isInited) {
init(null);
}
return GROUPS_MAX;
}
public static int getCountersMax() {
if (!isInited) {
init(null);
}
return COUNTERS_MAX;
}
public static String filterName(String name, int maxLen) {
return name.length() > maxLen ? name.substring(0, maxLen - 1) : name;
}
public static String filterCounterName(String name) {
return filterName(name, getCounterNameMax());
}
public static String filterGroupName(String name) {
return filterName(name, getGroupNameMax());
}
public synchronized void checkCounters(int size) {
if (firstViolation != null) {
throw new LimitExceededException(firstViolation);
}
int countersMax = getCountersMax();
if (size > countersMax) {
firstViolation = new LimitExceededException("Too many counters: "+ size +
" max="+ countersMax);
throw firstViolation;
}
}
public synchronized void incrCounters() {
checkCounters(totalCounters + 1);
++totalCounters;
}
public synchronized void checkGroups(int size) {
if (firstViolation != null) {
throw new LimitExceededException(firstViolation);
}
int groupsMax = getGroupsMax();
if (size > groupsMax) {
firstViolation = new LimitExceededException("Too many counter groups: "+
size +" max="+ groupsMax);
}
}
public synchronized LimitExceededException violation() {
return firstViolation;
}
// This allows initialization of global settings and not for an instance
public static synchronized void reset(Configuration conf) {
isInited = false;
init(conf);
}
// This allows resetting of an instance to allow reuse
public synchronized void reset() {
totalCounters = 0;
firstViolation = null;
}
}
| 4,058 | 28.201439 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupBase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.counters;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Counter;
/**
* The common counter group interface.
*
* @param <T> type of the counter for the group
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface CounterGroupBase<T extends Counter>
extends Writable, Iterable<T> {
/**
* Get the internal name of the group
* @return the internal name
*/
String getName();
/**
* Get the display name of the group.
* @return the human readable name
*/
String getDisplayName();
/**
* Set the display name of the group
* @param displayName of the group
*/
void setDisplayName(String displayName);
/** Add a counter to this group.
* @param counter to add
*/
void addCounter(T counter);
/**
* Add a counter to this group
* @param name of the counter
* @param displayName of the counter
* @param value of the counter
* @return the counter
*/
T addCounter(String name, String displayName, long value);
/**
* Find a counter in the group.
* @param counterName the name of the counter
* @param displayName the display name of the counter
* @return the counter that was found or added
*/
T findCounter(String counterName, String displayName);
/**
* Find a counter in the group
* @param counterName the name of the counter
* @param create create the counter if not found if true
* @return the counter that was found or added or null if create is false
*/
T findCounter(String counterName, boolean create);
/**
* Find a counter in the group.
* @param counterName the name of the counter
* @return the counter that was found or added
*/
T findCounter(String counterName);
/**
* @return the number of counters in this group.
*/
int size();
/**
* Increment all counters by a group of counters
* @param rightGroup the group to be added to this group
*/
void incrAllCounters(CounterGroupBase<T> rightGroup);
@Private
/**
* Exposes the underlying group type if a facade.
* @return the underlying object that this object is wrapping up.
*/
CounterGroupBase<T> getUnderlyingGroup();
}
| 3,224 | 28.318182 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* An {@link org.apache.hadoop.mapreduce.OutputFormat} that writes
* {@link MapFile}s.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MapFileOutputFormat
extends FileOutputFormat<WritableComparable<?>, Writable> {
public RecordWriter<WritableComparable<?>, Writable> getRecordWriter(
TaskAttemptContext context) throws IOException {
Configuration conf = context.getConfiguration();
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(context)) {
// find the kind of compression to do
compressionType = SequenceFileOutputFormat.getOutputCompressionType(context);
// find the right codec
Class<?> codecClass = getOutputCompressorClass(context,
DefaultCodec.class);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
}
Path file = getDefaultWorkFile(context, "");
FileSystem fs = file.getFileSystem(conf);
// ignore the progress parameter, since MapFile is local
final MapFile.Writer out =
new MapFile.Writer(conf, fs, file.toString(),
context.getOutputKeyClass().asSubclass(WritableComparable.class),
context.getOutputValueClass().asSubclass(Writable.class),
compressionType, codec, context);
return new RecordWriter<WritableComparable<?>, Writable>() {
public void write(WritableComparable<?> key, Writable value)
throws IOException {
out.append(key, value);
}
public void close(TaskAttemptContext context) throws IOException {
out.close();
}
};
}
/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(Path dir,
Configuration conf) throws IOException {
FileSystem fs = dir.getFileSystem(conf);
PathFilter filter = new PathFilter() {
@Override
public boolean accept(Path path) {
String name = path.getName();
if (name.startsWith("_") || name.startsWith("."))
return false;
return true;
}
};
Path[] names = FileUtil.stat2Paths(fs.listStatus(dir, filter));
// sort names, so that hash partitioning works
Arrays.sort(names);
MapFile.Reader[] parts = new MapFile.Reader[names.length];
for (int i = 0; i < names.length; i++) {
parts[i] = new MapFile.Reader(fs, names[i].toString(), conf);
}
return parts;
}
/** Get an entry from output generated by this class. */
public static <K extends WritableComparable<?>, V extends Writable>
Writable getEntry(MapFile.Reader[] readers,
Partitioner<K, V> partitioner, K key, V value) throws IOException {
int part = partitioner.getPartition(key, value, readers.length);
return readers[part].get(key, value);
}
}
| 4,574 | 36.5 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* Consume all outputs and put them in /dev/null.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NullOutputFormat<K, V> extends OutputFormat<K, V> {
@Override
public RecordWriter<K, V>
getRecordWriter(TaskAttemptContext context) {
return new RecordWriter<K, V>(){
public void write(K key, V value) { }
public void close(TaskAttemptContext context) { }
};
}
@Override
public void checkOutputSpecs(JobContext context) { }
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext context) {
return new OutputCommitter() {
public void abortTask(TaskAttemptContext taskContext) { }
public void cleanupJob(JobContext jobContext) { }
public void commitTask(TaskAttemptContext taskContext) { }
public boolean needsTaskCommit(TaskAttemptContext taskContext) {
return false;
}
public void setupJob(JobContext jobContext) { }
public void setupTask(TaskAttemptContext taskContext) { }
@Override
@Deprecated
public boolean isRecoverySupported() {
return true;
}
@Override
public void recoverTask(TaskAttemptContext taskContext)
throws IOException {
// Nothing to do for recovering the task.
}
};
}
}
| 2,576 | 32.907895 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.*;
/** An {@link OutputFormat} that writes plain text files. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TextOutputFormat<K, V> extends FileOutputFormat<K, V> {
public static String SEPERATOR = "mapreduce.output.textoutputformat.separator";
protected static class LineRecordWriter<K, V>
extends RecordWriter<K, V> {
private static final String utf8 = "UTF-8";
private static final byte[] newline;
static {
try {
newline = "\n".getBytes(utf8);
} catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException("can't find " + utf8 + " encoding");
}
}
protected DataOutputStream out;
private final byte[] keyValueSeparator;
public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {
this.out = out;
try {
this.keyValueSeparator = keyValueSeparator.getBytes(utf8);
} catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException("can't find " + utf8 + " encoding");
}
}
public LineRecordWriter(DataOutputStream out) {
this(out, "\t");
}
/**
* Write the object to the byte stream, handling Text as a special
* case.
* @param o the object to print
* @throws IOException if the write throws, we pass it on
*/
private void writeObject(Object o) throws IOException {
if (o instanceof Text) {
Text to = (Text) o;
out.write(to.getBytes(), 0, to.getLength());
} else {
out.write(o.toString().getBytes(utf8));
}
}
public synchronized void write(K key, V value)
throws IOException {
boolean nullKey = key == null || key instanceof NullWritable;
boolean nullValue = value == null || value instanceof NullWritable;
if (nullKey && nullValue) {
return;
}
if (!nullKey) {
writeObject(key);
}
if (!(nullKey || nullValue)) {
out.write(keyValueSeparator);
}
if (!nullValue) {
writeObject(value);
}
out.write(newline);
}
public synchronized
void close(TaskAttemptContext context) throws IOException {
out.close();
}
}
public RecordWriter<K, V>
getRecordWriter(TaskAttemptContext job
) throws IOException, InterruptedException {
Configuration conf = job.getConfiguration();
boolean isCompressed = getCompressOutput(job);
String keyValueSeparator= conf.get(SEPERATOR, "\t");
CompressionCodec codec = null;
String extension = "";
if (isCompressed) {
Class<? extends CompressionCodec> codecClass =
getOutputCompressorClass(job, GzipCodec.class);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
extension = codec.getDefaultExtension();
}
Path file = getDefaultWorkFile(job, extension);
FileSystem fs = file.getFileSystem(conf);
if (!isCompressed) {
FSDataOutputStream fileOut = fs.create(file, false);
return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
} else {
FSDataOutputStream fileOut = fs.create(file, false);
return new LineRecordWriter<K, V>(new DataOutputStream
(codec.createOutputStream(fileOut)),
keyValueSeparator);
}
}
}
| 4,995 | 33.937063 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.task.annotation.Checkpointable;
import com.google.common.annotations.VisibleForTesting;
/** An {@link OutputCommitter} that commits files specified
* in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}.
**/
@Checkpointable
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class PartialFileOutputCommitter
extends FileOutputCommitter implements PartialOutputCommitter {
private static final Log LOG =
LogFactory.getLog(PartialFileOutputCommitter.class);
public PartialFileOutputCommitter(Path outputPath,
TaskAttemptContext context) throws IOException {
super(outputPath, context);
}
public PartialFileOutputCommitter(Path outputPath,
JobContext context) throws IOException {
super(outputPath, context);
}
@Override
public Path getCommittedTaskPath(int appAttemptId, TaskAttemptContext context) {
return new Path(getJobAttemptPath(appAttemptId),
String.valueOf(context.getTaskAttemptID()));
}
@VisibleForTesting
FileSystem fsFor(Path p, Configuration conf) throws IOException {
return p.getFileSystem(conf);
}
@Override
public void cleanUpPartialOutputForTask(TaskAttemptContext context)
throws IOException {
// we double check this is never invoked from a non-preemptable subclass.
// This should never happen, since the invoking codes is checking it too,
// but it is safer to double check. Errors handling this would produce
// inconsistent output.
if (!this.getClass().isAnnotationPresent(Checkpointable.class)) {
throw new IllegalStateException("Invoking cleanUpPartialOutputForTask() " +
"from non @Preemptable class");
}
FileSystem fs =
fsFor(getTaskAttemptPath(context), context.getConfiguration());
LOG.info("cleanUpPartialOutputForTask: removing everything belonging to " +
context.getTaskAttemptID().getTaskID() + " in: " +
getCommittedTaskPath(context).getParent());
final TaskAttemptID taid = context.getTaskAttemptID();
final TaskID tid = taid.getTaskID();
Path pCommit = getCommittedTaskPath(context).getParent();
// remove any committed output
for (int i = 0; i < taid.getId(); ++i) {
TaskAttemptID oldId = new TaskAttemptID(tid, i);
Path pTask = new Path(pCommit, oldId.toString());
if (fs.exists(pTask) && !fs.delete(pTask, true)) {
throw new IOException("Failed to delete " + pTask);
}
}
}
}
| 4,004 | 36.429907 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
import java.io.IOException;
import java.util.*;
/**
* The MultipleOutputs class simplifies writing output data
* to multiple outputs
*
* <p>
* Case one: writing to additional outputs other than the job default output.
*
* Each additional output, or named output, may be configured with its own
* <code>OutputFormat</code>, with its own key class and with its own value
* class.
* </p>
*
* <p>
* Case two: to write data to different files provided by user
* </p>
*
* <p>
* MultipleOutputs supports counters, by default they are disabled. The
* counters group is the {@link MultipleOutputs} class name. The names of the
* counters are the same as the output name. These count the number records
* written to each output name.
* </p>
*
* Usage pattern for job submission:
* <pre>
*
* Job job = new Job();
*
* FileInputFormat.setInputPath(job, inDir);
* FileOutputFormat.setOutputPath(job, outDir);
*
* job.setMapperClass(MOMap.class);
* job.setReducerClass(MOReduce.class);
* ...
*
* // Defines additional single text based output 'text' for the job
* MultipleOutputs.addNamedOutput(job, "text", TextOutputFormat.class,
* LongWritable.class, Text.class);
*
* // Defines additional sequence-file based output 'sequence' for the job
* MultipleOutputs.addNamedOutput(job, "seq",
* SequenceFileOutputFormat.class,
* LongWritable.class, Text.class);
* ...
*
* job.waitForCompletion(true);
* ...
* </pre>
* <p>
* Usage in Reducer:
* <pre>
* <K, V> String generateFileName(K k, V v) {
* return k.toString() + "_" + v.toString();
* }
*
* public class MOReduce extends
* Reducer<WritableComparable, Writable,WritableComparable, Writable> {
* private MultipleOutputs mos;
* public void setup(Context context) {
* ...
* mos = new MultipleOutputs(context);
* }
*
* public void reduce(WritableComparable key, Iterator<Writable> values,
* Context context)
* throws IOException {
* ...
* mos.write("text", , key, new Text("Hello"));
* mos.write("seq", LongWritable(1), new Text("Bye"), "seq_a");
* mos.write("seq", LongWritable(2), key, new Text("Chau"), "seq_b");
* mos.write(key, new Text("value"), generateFileName(key, new Text("value")));
* ...
* }
*
* public void cleanup(Context) throws IOException {
* mos.close();
* ...
* }
*
* }
* </pre>
*
* <p>
* When used in conjuction with org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat,
* MultipleOutputs can mimic the behaviour of MultipleTextOutputFormat and MultipleSequenceFileOutputFormat
* from the old Hadoop API - ie, output can be written from the Reducer to more than one location.
* </p>
*
* <p>
* Use <code>MultipleOutputs.write(KEYOUT key, VALUEOUT value, String baseOutputPath)</code> to write key and
* value to a path specified by <code>baseOutputPath</code>, with no need to specify a named output:
* </p>
*
* <pre>
* private MultipleOutputs<Text, Text> out;
*
* public void setup(Context context) {
* out = new MultipleOutputs<Text, Text>(context);
* ...
* }
*
* public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
* for (Text t : values) {
* out.write(key, t, generateFileName(<<i>parameter list...</i>>));
* }
* }
*
* protected void cleanup(Context context) throws IOException, InterruptedException {
* out.close();
* }
* </pre>
*
* <p>
* Use your own code in <code>generateFileName()</code> to create a custom path to your results.
* '/' characters in <code>baseOutputPath</code> will be translated into directory levels in your file system.
* Also, append your custom-generated path with "part" or similar, otherwise your output will be -00000, -00001 etc.
* No call to <code>context.write()</code> is necessary. See example <code>generateFileName()</code> code below.
* </p>
*
* <pre>
* private String generateFileName(Text k) {
* // expect Text k in format "Surname|Forename"
* String[] kStr = k.toString().split("\\|");
*
* String sName = kStr[0];
* String fName = kStr[1];
*
* // example for k = Smith|John
* // output written to /user/hadoop/path/to/output/Smith/John-r-00000 (etc)
* return sName + "/" + fName;
* }
* </pre>
*
* <p>
* Using MultipleOutputs in this way will still create zero-sized default output, eg part-00000.
* To prevent this use <code>LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);</code>
* instead of <code>job.setOutputFormatClass(TextOutputFormat.class);</code> in your Hadoop job configuration.
* </p>
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultipleOutputs<KEYOUT, VALUEOUT> {
private static final String MULTIPLE_OUTPUTS = "mapreduce.multipleoutputs";
private static final String MO_PREFIX =
"mapreduce.multipleoutputs.namedOutput.";
private static final String FORMAT = ".format";
private static final String KEY = ".key";
private static final String VALUE = ".value";
private static final String COUNTERS_ENABLED =
"mapreduce.multipleoutputs.counters";
/**
* Counters group used by the counters of MultipleOutputs.
*/
private static final String COUNTERS_GROUP = MultipleOutputs.class.getName();
/**
* Cache for the taskContexts
*/
private Map<String, TaskAttemptContext> taskContexts = new HashMap<String, TaskAttemptContext>();
/**
* Cached TaskAttemptContext which uses the job's configured settings
*/
private TaskAttemptContext jobOutputFormatContext;
/**
* Checks if a named output name is valid token.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkTokenName(String namedOutput) {
if (namedOutput == null || namedOutput.length() == 0) {
throw new IllegalArgumentException(
"Name cannot be NULL or emtpy");
}
for (char ch : namedOutput.toCharArray()) {
if ((ch >= 'A') && (ch <= 'Z')) {
continue;
}
if ((ch >= 'a') && (ch <= 'z')) {
continue;
}
if ((ch >= '0') && (ch <= '9')) {
continue;
}
throw new IllegalArgumentException(
"Name cannot be have a '" + ch + "' char");
}
}
/**
* Checks if output name is valid.
*
* name cannot be the name used for the default output
* @param outputPath base output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkBaseOutputPath(String outputPath) {
if (outputPath.equals(FileOutputFormat.PART)) {
throw new IllegalArgumentException("output name cannot be 'part'");
}
}
/**
* Checks if a named output name is valid.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkNamedOutputName(JobContext job,
String namedOutput, boolean alreadyDefined) {
checkTokenName(namedOutput);
checkBaseOutputPath(namedOutput);
List<String> definedChannels = getNamedOutputsList(job);
if (alreadyDefined && definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' already alreadyDefined");
} else if (!alreadyDefined && !definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' not defined");
}
}
// Returns list of channel names.
private static List<String> getNamedOutputsList(JobContext job) {
List<String> names = new ArrayList<String>();
StringTokenizer st = new StringTokenizer(
job.getConfiguration().get(MULTIPLE_OUTPUTS, ""), " ");
while (st.hasMoreTokens()) {
names.add(st.nextToken());
}
return names;
}
// Returns the named output OutputFormat.
@SuppressWarnings("unchecked")
private static Class<? extends OutputFormat<?, ?>> getNamedOutputFormatClass(
JobContext job, String namedOutput) {
return (Class<? extends OutputFormat<?, ?>>)
job.getConfiguration().getClass(MO_PREFIX + namedOutput + FORMAT, null,
OutputFormat.class);
}
// Returns the key class for a named output.
private static Class<?> getNamedOutputKeyClass(JobContext job,
String namedOutput) {
return job.getConfiguration().getClass(MO_PREFIX + namedOutput + KEY, null,
Object.class);
}
// Returns the value class for a named output.
private static Class<?> getNamedOutputValueClass(
JobContext job, String namedOutput) {
return job.getConfiguration().getClass(MO_PREFIX + namedOutput + VALUE,
null, Object.class);
}
/**
* Adds a named output for the job.
*
* @param job job to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the default output.
* @param outputFormatClass OutputFormat class.
* @param keyClass key class
* @param valueClass value class
*/
@SuppressWarnings("unchecked")
public static void addNamedOutput(Job job, String namedOutput,
Class<? extends OutputFormat> outputFormatClass,
Class<?> keyClass, Class<?> valueClass) {
checkNamedOutputName(job, namedOutput, true);
Configuration conf = job.getConfiguration();
conf.set(MULTIPLE_OUTPUTS,
conf.get(MULTIPLE_OUTPUTS, "") + " " + namedOutput);
conf.setClass(MO_PREFIX + namedOutput + FORMAT, outputFormatClass,
OutputFormat.class);
conf.setClass(MO_PREFIX + namedOutput + KEY, keyClass, Object.class);
conf.setClass(MO_PREFIX + namedOutput + VALUE, valueClass, Object.class);
}
/**
* Enables or disables counters for the named outputs.
*
* The counters group is the {@link MultipleOutputs} class name.
* The names of the counters are the same as the named outputs. These
* counters count the number records written to each output name.
* By default these counters are disabled.
*
* @param job job to enable counters
* @param enabled indicates if the counters will be enabled or not.
*/
public static void setCountersEnabled(Job job, boolean enabled) {
job.getConfiguration().setBoolean(COUNTERS_ENABLED, enabled);
}
/**
* Returns if the counters for the named outputs are enabled or not.
* By default these counters are disabled.
*
* @param job the job
* @return TRUE if the counters are enabled, FALSE if they are disabled.
*/
public static boolean getCountersEnabled(JobContext job) {
return job.getConfiguration().getBoolean(COUNTERS_ENABLED, false);
}
/**
* Wraps RecordWriter to increment counters.
*/
@SuppressWarnings("unchecked")
private static class RecordWriterWithCounter extends RecordWriter {
private RecordWriter writer;
private String counterName;
private TaskInputOutputContext context;
public RecordWriterWithCounter(RecordWriter writer, String counterName,
TaskInputOutputContext context) {
this.writer = writer;
this.counterName = counterName;
this.context = context;
}
@SuppressWarnings({"unchecked"})
public void write(Object key, Object value)
throws IOException, InterruptedException {
context.getCounter(COUNTERS_GROUP, counterName).increment(1);
writer.write(key, value);
}
public void close(TaskAttemptContext context)
throws IOException, InterruptedException {
writer.close(context);
}
}
// instance code, to be used from Mapper/Reducer code
private TaskInputOutputContext<?, ?, KEYOUT, VALUEOUT> context;
private Set<String> namedOutputs;
private Map<String, RecordWriter<?, ?>> recordWriters;
private boolean countersEnabled;
/**
* Creates and initializes multiple outputs support,
* it should be instantiated in the Mapper/Reducer setup method.
*
* @param context the TaskInputOutputContext object
*/
public MultipleOutputs(
TaskInputOutputContext<?, ?, KEYOUT, VALUEOUT> context) {
this.context = context;
namedOutputs = Collections.unmodifiableSet(
new HashSet<String>(MultipleOutputs.getNamedOutputsList(context)));
recordWriters = new HashMap<String, RecordWriter<?, ?>>();
countersEnabled = getCountersEnabled(context);
}
/**
* Write key and value to the namedOutput.
*
* Output path is a unique file generated for the namedOutput.
* For example, {namedOutput}-(m|r)-{part-number}
*
* @param namedOutput the named output name
* @param key the key
* @param value the value
*/
@SuppressWarnings("unchecked")
public <K, V> void write(String namedOutput, K key, V value)
throws IOException, InterruptedException {
write(namedOutput, key, value, namedOutput);
}
/**
* Write key and value to baseOutputPath using the namedOutput.
*
* @param namedOutput the named output name
* @param key the key
* @param value the value
* @param baseOutputPath base-output path to write the record to.
* Note: Framework will generate unique filename for the baseOutputPath
*/
@SuppressWarnings("unchecked")
public <K, V> void write(String namedOutput, K key, V value,
String baseOutputPath) throws IOException, InterruptedException {
checkNamedOutputName(context, namedOutput, false);
checkBaseOutputPath(baseOutputPath);
if (!namedOutputs.contains(namedOutput)) {
throw new IllegalArgumentException("Undefined named output '" +
namedOutput + "'");
}
TaskAttemptContext taskContext = getContext(namedOutput);
getRecordWriter(taskContext, baseOutputPath).write(key, value);
}
/**
* Write key value to an output file name.
*
* Gets the record writer from job's output format.
* Job's output format should be a FileOutputFormat.
*
* @param key the key
* @param value the value
* @param baseOutputPath base-output path to write the record to.
* Note: Framework will generate unique filename for the baseOutputPath
*/
@SuppressWarnings("unchecked")
public void write(KEYOUT key, VALUEOUT value, String baseOutputPath)
throws IOException, InterruptedException {
checkBaseOutputPath(baseOutputPath);
if (jobOutputFormatContext == null) {
jobOutputFormatContext =
new TaskAttemptContextImpl(context.getConfiguration(),
context.getTaskAttemptID(),
new WrappedStatusReporter(context));
}
getRecordWriter(jobOutputFormatContext, baseOutputPath).write(key, value);
}
// by being synchronized MultipleOutputTask can be use with a
// MultithreadedMapper.
@SuppressWarnings("unchecked")
private synchronized RecordWriter getRecordWriter(
TaskAttemptContext taskContext, String baseFileName)
throws IOException, InterruptedException {
// look for record-writer in the cache
RecordWriter writer = recordWriters.get(baseFileName);
// If not in cache, create a new one
if (writer == null) {
// get the record writer from context output format
FileOutputFormat.setOutputName(taskContext, baseFileName);
try {
writer = ((OutputFormat) ReflectionUtils.newInstance(
taskContext.getOutputFormatClass(), taskContext.getConfiguration()))
.getRecordWriter(taskContext);
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
// if counters are enabled, wrap the writer with context
// to increment counters
if (countersEnabled) {
writer = new RecordWriterWithCounter(writer, baseFileName, context);
}
// add the record-writer to the cache
recordWriters.put(baseFileName, writer);
}
return writer;
}
// Create a taskAttemptContext for the named output with
// output format and output key/value types put in the context
private TaskAttemptContext getContext(String nameOutput) throws IOException {
TaskAttemptContext taskContext = taskContexts.get(nameOutput);
if (taskContext != null) {
return taskContext;
}
// The following trick leverages the instantiation of a record writer via
// the job thus supporting arbitrary output formats.
Job job = Job.getInstance(context.getConfiguration());
job.setOutputFormatClass(getNamedOutputFormatClass(context, nameOutput));
job.setOutputKeyClass(getNamedOutputKeyClass(context, nameOutput));
job.setOutputValueClass(getNamedOutputValueClass(context, nameOutput));
taskContext = new TaskAttemptContextImpl(job.getConfiguration(), context
.getTaskAttemptID(), new WrappedStatusReporter(context));
taskContexts.put(nameOutput, taskContext);
return taskContext;
}
private static class WrappedStatusReporter extends StatusReporter {
TaskAttemptContext context;
public WrappedStatusReporter(TaskAttemptContext context) {
this.context = context;
}
@Override
public Counter getCounter(Enum<?> name) {
return context.getCounter(name);
}
@Override
public Counter getCounter(String group, String name) {
return context.getCounter(group, name);
}
@Override
public void progress() {
context.progress();
}
@Override
public float getProgress() {
return context.getProgress();
}
@Override
public void setStatus(String status) {
context.setStatus(status);
}
}
/**
* Closes all the opened outputs.
*
* This should be called from cleanup method of map/reduce task.
* If overridden subclasses must invoke <code>super.close()</code> at the
* end of their <code>close()</code>
*
*/
@SuppressWarnings("unchecked")
public void close() throws IOException, InterruptedException {
for (RecordWriter writer : recordWriters.values()) {
writer.close(context);
}
}
}
| 19,557 | 33.55477 | 120 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* Interface for an {@link org.apache.hadoop.mapreduce.OutputCommitter}
* implementing partial commit of task output, as during preemption.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface PartialOutputCommitter {
/**
* Remove all previously committed outputs from prior executions of this task.
* @param context Context for cleaning up previously promoted output.
* @throws IOException If cleanup fails, then the state of the task my not be
* well defined.
*/
public void cleanUpPartialOutputForTask(TaskAttemptContext context)
throws IOException;
}
| 1,687 | 38.255814 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A Convenience class that creates output lazily.
* Use in conjuction with org.apache.hadoop.mapreduce.lib.output.MultipleOutputs to recreate the
* behaviour of org.apache.hadoop.mapred.lib.MultipleTextOutputFormat (etc) of the old Hadoop API.
* See {@link MultipleOutputs} documentation for more information.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LazyOutputFormat <K,V> extends FilterOutputFormat<K, V> {
public static String OUTPUT_FORMAT =
"mapreduce.output.lazyoutputformat.outputformat";
/**
* Set the underlying output format for LazyOutputFormat.
* @param job the {@link Job} to modify
* @param theClass the underlying class
*/
@SuppressWarnings("unchecked")
public static void setOutputFormatClass(Job job,
Class<? extends OutputFormat> theClass) {
job.setOutputFormatClass(LazyOutputFormat.class);
job.getConfiguration().setClass(OUTPUT_FORMAT,
theClass, OutputFormat.class);
}
@SuppressWarnings("unchecked")
private void getBaseOutputFormat(Configuration conf)
throws IOException {
baseOut = ((OutputFormat<K, V>) ReflectionUtils.newInstance(
conf.getClass(OUTPUT_FORMAT, null), conf));
if (baseOut == null) {
throw new IOException("Output Format not set for LazyOutputFormat");
}
}
@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
throws IOException, InterruptedException {
if (baseOut == null) {
getBaseOutputFormat(context.getConfiguration());
}
return new LazyRecordWriter<K, V>(baseOut, context);
}
@Override
public void checkOutputSpecs(JobContext context)
throws IOException, InterruptedException {
if (baseOut == null) {
getBaseOutputFormat(context.getConfiguration());
}
super.checkOutputSpecs(context);
}
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
throws IOException, InterruptedException {
if (baseOut == null) {
getBaseOutputFormat(context.getConfiguration());
}
return super.getOutputCommitter(context);
}
/**
* A convenience class to be used with LazyOutputFormat
*/
private static class LazyRecordWriter<K,V> extends FilterRecordWriter<K,V> {
final OutputFormat<K,V> outputFormat;
final TaskAttemptContext taskContext;
public LazyRecordWriter(OutputFormat<K,V> out,
TaskAttemptContext taskContext)
throws IOException, InterruptedException {
this.outputFormat = out;
this.taskContext = taskContext;
}
@Override
public void write(K key, V value) throws IOException, InterruptedException {
if (rawWriter == null) {
rawWriter = outputFormat.getRecordWriter(taskContext);
}
rawWriter.write(key, value);
}
@Override
public void close(TaskAttemptContext context)
throws IOException, InterruptedException {
if (rawWriter != null) {
rawWriter.close(context);
}
}
}
}
| 4,447 | 33.75 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import java.text.NumberFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapred.FileAlreadyExistsException;
import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.security.TokenCache;
/** A base class for {@link OutputFormat}s that read from {@link FileSystem}s.*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class FileOutputFormat<K, V> extends OutputFormat<K, V> {
/** Construct output file names so that, when an output directory listing is
* sorted lexicographically, positions correspond to output partitions.*/
private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
protected static final String BASE_OUTPUT_NAME = "mapreduce.output.basename";
protected static final String PART = "part";
static {
NUMBER_FORMAT.setMinimumIntegerDigits(5);
NUMBER_FORMAT.setGroupingUsed(false);
}
private FileOutputCommitter committer = null;
public static final String COMPRESS ="mapreduce.output.fileoutputformat.compress";
public static final String COMPRESS_CODEC =
"mapreduce.output.fileoutputformat.compress.codec";
public static final String COMPRESS_TYPE = "mapreduce.output.fileoutputformat.compress.type";
public static final String OUTDIR = "mapreduce.output.fileoutputformat.outputdir";
@Deprecated
public static enum Counter {
BYTES_WRITTEN
}
/**
* Set whether the output of the job is compressed.
* @param job the job to modify
* @param compress should the output of the job be compressed?
*/
public static void setCompressOutput(Job job, boolean compress) {
job.getConfiguration().setBoolean(FileOutputFormat.COMPRESS, compress);
}
/**
* Is the job output compressed?
* @param job the Job to look in
* @return <code>true</code> if the job output should be compressed,
* <code>false</code> otherwise
*/
public static boolean getCompressOutput(JobContext job) {
return job.getConfiguration().getBoolean(
FileOutputFormat.COMPRESS, false);
}
/**
* Set the {@link CompressionCodec} to be used to compress job outputs.
* @param job the job to modify
* @param codecClass the {@link CompressionCodec} to be used to
* compress the job outputs
*/
public static void
setOutputCompressorClass(Job job,
Class<? extends CompressionCodec> codecClass) {
setCompressOutput(job, true);
job.getConfiguration().setClass(FileOutputFormat.COMPRESS_CODEC,
codecClass,
CompressionCodec.class);
}
/**
* Get the {@link CompressionCodec} for compressing the job outputs.
* @param job the {@link Job} to look in
* @param defaultValue the {@link CompressionCodec} to return if not set
* @return the {@link CompressionCodec} to be used to compress the
* job outputs
* @throws IllegalArgumentException if the class was specified, but not found
*/
public static Class<? extends CompressionCodec>
getOutputCompressorClass(JobContext job,
Class<? extends CompressionCodec> defaultValue) {
Class<? extends CompressionCodec> codecClass = defaultValue;
Configuration conf = job.getConfiguration();
String name = conf.get(FileOutputFormat.COMPRESS_CODEC);
if (name != null) {
try {
codecClass =
conf.getClassByName(name).asSubclass(CompressionCodec.class);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Compression codec " + name +
" was not found.", e);
}
}
return codecClass;
}
public abstract RecordWriter<K, V>
getRecordWriter(TaskAttemptContext job
) throws IOException, InterruptedException;
public void checkOutputSpecs(JobContext job
) throws FileAlreadyExistsException, IOException{
// Ensure that the output directory is set and not already there
Path outDir = getOutputPath(job);
if (outDir == null) {
throw new InvalidJobConfException("Output directory not set.");
}
// get delegation token for outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] { outDir }, job.getConfiguration());
if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
throw new FileAlreadyExistsException("Output directory " + outDir +
" already exists");
}
}
/**
* Set the {@link Path} of the output directory for the map-reduce job.
*
* @param job The job to modify
* @param outputDir the {@link Path} of the output directory for
* the map-reduce job.
*/
public static void setOutputPath(Job job, Path outputDir) {
try {
outputDir = outputDir.getFileSystem(job.getConfiguration()).makeQualified(
outputDir);
} catch (IOException e) {
// Throw the IOException as a RuntimeException to be compatible with MR1
throw new RuntimeException(e);
}
job.getConfiguration().set(FileOutputFormat.OUTDIR, outputDir.toString());
}
/**
* Get the {@link Path} to the output directory for the map-reduce job.
*
* @return the {@link Path} to the output directory for the map-reduce job.
* @see FileOutputFormat#getWorkOutputPath(TaskInputOutputContext)
*/
public static Path getOutputPath(JobContext job) {
String name = job.getConfiguration().get(FileOutputFormat.OUTDIR);
return name == null ? null: new Path(name);
}
/**
* Get the {@link Path} to the task's temporary output directory
* for the map-reduce job
*
* <b id="SideEffectFiles">Tasks' Side-Effect Files</b>
*
* <p>Some applications need to create/write-to side-files, which differ from
* the actual job-outputs.
*
* <p>In such cases there could be issues with 2 instances of the same TIP
* (running simultaneously e.g. speculative tasks) trying to open/write-to the
* same file (path) on HDFS. Hence the application-writer will have to pick
* unique names per task-attempt (e.g. using the attemptid, say
* <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
*
* <p>To get around this the Map-Reduce framework helps the application-writer
* out by maintaining a special
* <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt>
* sub-directory for each task-attempt on HDFS where the output of the
* task-attempt goes. On successful completion of the task-attempt the files
* in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only)
* are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the
* framework discards the sub-directory of unsuccessful task-attempts. This
* is completely transparent to the application.</p>
*
* <p>The application-writer can take advantage of this by creating any
* side-files required in a work directory during execution
* of his task i.e. via
* {@link #getWorkOutputPath(TaskInputOutputContext)}, and
* the framework will move them out similarly - thus she doesn't have to pick
* unique paths per task-attempt.</p>
*
* <p>The entire discussion holds true for maps of jobs with
* reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
* goes directly to HDFS.</p>
*
* @return the {@link Path} to the task's temporary output directory
* for the map-reduce job.
*/
public static Path getWorkOutputPath(TaskInputOutputContext<?,?,?,?> context
) throws IOException,
InterruptedException {
FileOutputCommitter committer = (FileOutputCommitter)
context.getOutputCommitter();
return committer.getWorkPath();
}
/**
* Helper function to generate a {@link Path} for a file that is unique for
* the task within the job output directory.
*
* <p>The path can be used to create custom files from within the map and
* reduce tasks. The path name will be unique for each task. The path parent
* will be the job output directory.</p>ls
*
* <p>This method uses the {@link #getUniqueFile} method to make the file name
* unique for the task.</p>
*
* @param context the context for the task.
* @param name the name for the file.
* @param extension the extension for the file
* @return a unique path accross all tasks of the job.
*/
public
static Path getPathForWorkFile(TaskInputOutputContext<?,?,?,?> context,
String name,
String extension
) throws IOException, InterruptedException {
return new Path(getWorkOutputPath(context),
getUniqueFile(context, name, extension));
}
/**
* Generate a unique filename, based on the task id, name, and extension
* @param context the task that is calling this
* @param name the base filename
* @param extension the filename extension
* @return a string like $name-[mrsct]-$id$extension
*/
public synchronized static String getUniqueFile(TaskAttemptContext context,
String name,
String extension) {
TaskID taskId = context.getTaskAttemptID().getTaskID();
int partition = taskId.getId();
StringBuilder result = new StringBuilder();
result.append(name);
result.append('-');
result.append(
TaskID.getRepresentingCharacter(taskId.getTaskType()));
result.append('-');
result.append(NUMBER_FORMAT.format(partition));
result.append(extension);
return result.toString();
}
/**
* Get the default path and filename for the output format.
* @param context the task context
* @param extension an extension to add to the filename
* @return a full path $output/_temporary/$taskid/part-[mr]-$id
* @throws IOException
*/
public Path getDefaultWorkFile(TaskAttemptContext context,
String extension) throws IOException{
FileOutputCommitter committer =
(FileOutputCommitter) getOutputCommitter(context);
return new Path(committer.getWorkPath(), getUniqueFile(context,
getOutputName(context), extension));
}
/**
* Get the base output name for the output file.
*/
protected static String getOutputName(JobContext job) {
return job.getConfiguration().get(BASE_OUTPUT_NAME, PART);
}
/**
* Set the base output name for output file to be created.
*/
protected static void setOutputName(JobContext job, String name) {
job.getConfiguration().set(BASE_OUTPUT_NAME, name);
}
public synchronized
OutputCommitter getOutputCommitter(TaskAttemptContext context
) throws IOException {
if (committer == null) {
Path output = getOutputPath(context);
committer = new FileOutputCommitter(output, context);
}
return committer;
}
}
| 12,786 | 39.593651 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
// Counters used by Task classes
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum FileOutputFormatCounter {
BYTES_WRITTEN
}
| 1,120 | 37.655172 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FilterOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* FilterOutputFormat is a convenience class that wraps OutputFormat.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FilterOutputFormat <K,V> extends OutputFormat<K, V> {
protected OutputFormat<K,V> baseOut;
public FilterOutputFormat() {
this.baseOut = null;
}
/**
* Create a FilterOutputFormat based on the underlying output format.
* @param baseOut the underlying OutputFormat
*/
public FilterOutputFormat(OutputFormat<K,V> baseOut) {
this.baseOut = baseOut;
}
@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
throws IOException, InterruptedException {
return getBaseOut().getRecordWriter(context);
}
@Override
public void checkOutputSpecs(JobContext context)
throws IOException, InterruptedException {
getBaseOut().checkOutputSpecs(context);
}
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
throws IOException, InterruptedException {
return getBaseOut().getOutputCommitter(context);
}
private OutputFormat<K,V> getBaseOut() throws IOException {
if (baseOut == null) {
throw new IOException("OutputFormat not set for FilterOutputFormat");
}
return baseOut;
}
/**
* <code>FilterRecordWriter</code> is a convenience wrapper
* class that extends the {@link RecordWriter}.
*/
public static class FilterRecordWriter<K,V> extends RecordWriter<K,V> {
protected RecordWriter<K,V> rawWriter = null;
public FilterRecordWriter() {
rawWriter = null;
}
public FilterRecordWriter(RecordWriter<K,V> rwriter) {
this.rawWriter = rwriter;
}
@Override
public void write(K key, V value) throws IOException, InterruptedException {
getRawWriter().write(key, value);
}
@Override
public void close(TaskAttemptContext context)
throws IOException, InterruptedException {
getRawWriter().close(context);
}
private RecordWriter<K,V> getRawWriter() throws IOException {
if (rawWriter == null) {
throw new IOException("Record Writer not set for FilterRecordWriter");
}
return rawWriter;
}
}
}
| 3,454 | 29.848214 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/** An {@link OutputCommitter} that commits files specified
* in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}.
**/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileOutputCommitter extends OutputCommitter {
private static final Log LOG = LogFactory.getLog(FileOutputCommitter.class);
/**
* Name of directory where pending data is placed. Data that has not been
* committed yet.
*/
public static final String PENDING_DIR_NAME = "_temporary";
/**
* Temporary directory name
*
* The static variable to be compatible with M/R 1.x
*/
@Deprecated
protected static final String TEMP_DIR_NAME = PENDING_DIR_NAME;
public static final String SUCCEEDED_FILE_NAME = "_SUCCESS";
public static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER =
"mapreduce.fileoutputcommitter.marksuccessfuljobs";
public static final String FILEOUTPUTCOMMITTER_ALGORITHM_VERSION =
"mapreduce.fileoutputcommitter.algorithm.version";
public static final int FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT = 1;
private Path outputPath = null;
private Path workPath = null;
private final int algorithmVersion;
/**
* Create a file output committer
* @param outputPath the job's output path, or null if you want the output
* committer to act as a noop.
* @param context the task's context
* @throws IOException
*/
public FileOutputCommitter(Path outputPath,
TaskAttemptContext context) throws IOException {
this(outputPath, (JobContext)context);
if (outputPath != null) {
workPath = getTaskAttemptPath(context, outputPath);
}
}
/**
* Create a file output committer
* @param outputPath the job's output path, or null if you want the output
* committer to act as a noop.
* @param context the task's context
* @throws IOException
*/
@Private
public FileOutputCommitter(Path outputPath,
JobContext context) throws IOException {
Configuration conf = context.getConfiguration();
algorithmVersion =
conf.getInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT);
LOG.info("File Output Committer Algorithm version is " + algorithmVersion);
if (algorithmVersion != 1 && algorithmVersion != 2) {
throw new IOException("Only 1 or 2 algorithm version is supported");
}
if (outputPath != null) {
FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
this.outputPath = fs.makeQualified(outputPath);
}
}
/**
* @return the path where final output of the job should be placed. This
* could also be considered the committed application attempt path.
*/
private Path getOutputPath() {
return this.outputPath;
}
/**
* @return true if we have an output path set, else false.
*/
private boolean hasOutputPath() {
return this.outputPath != null;
}
/**
* @return the path where the output of pending job attempts are
* stored.
*/
private Path getPendingJobAttemptsPath() {
return getPendingJobAttemptsPath(getOutputPath());
}
/**
* Get the location of pending job attempts.
* @param out the base output directory.
* @return the location of pending job attempts.
*/
private static Path getPendingJobAttemptsPath(Path out) {
return new Path(out, PENDING_DIR_NAME);
}
/**
* Get the Application Attempt Id for this job
* @param context the context to look in
* @return the Application Attempt Id for a given job.
*/
private static int getAppAttemptId(JobContext context) {
return context.getConfiguration().getInt(
MRJobConfig.APPLICATION_ATTEMPT_ID, 0);
}
/**
* Compute the path where the output of a given job attempt will be placed.
* @param context the context of the job. This is used to get the
* application attempt id.
* @return the path to store job attempt data.
*/
public Path getJobAttemptPath(JobContext context) {
return getJobAttemptPath(context, getOutputPath());
}
/**
* Compute the path where the output of a given job attempt will be placed.
* @param context the context of the job. This is used to get the
* application attempt id.
* @param out the output path to place these in.
* @return the path to store job attempt data.
*/
public static Path getJobAttemptPath(JobContext context, Path out) {
return getJobAttemptPath(getAppAttemptId(context), out);
}
/**
* Compute the path where the output of a given job attempt will be placed.
* @param appAttemptId the ID of the application attempt for this job.
* @return the path to store job attempt data.
*/
protected Path getJobAttemptPath(int appAttemptId) {
return getJobAttemptPath(appAttemptId, getOutputPath());
}
/**
* Compute the path where the output of a given job attempt will be placed.
* @param appAttemptId the ID of the application attempt for this job.
* @return the path to store job attempt data.
*/
private static Path getJobAttemptPath(int appAttemptId, Path out) {
return new Path(getPendingJobAttemptsPath(out), String.valueOf(appAttemptId));
}
/**
* Compute the path where the output of pending task attempts are stored.
* @param context the context of the job with pending tasks.
* @return the path where the output of pending task attempts are stored.
*/
private Path getPendingTaskAttemptsPath(JobContext context) {
return getPendingTaskAttemptsPath(context, getOutputPath());
}
/**
* Compute the path where the output of pending task attempts are stored.
* @param context the context of the job with pending tasks.
* @return the path where the output of pending task attempts are stored.
*/
private static Path getPendingTaskAttemptsPath(JobContext context, Path out) {
return new Path(getJobAttemptPath(context, out), PENDING_DIR_NAME);
}
/**
* Compute the path where the output of a task attempt is stored until
* that task is committed.
*
* @param context the context of the task attempt.
* @return the path where a task attempt should be stored.
*/
public Path getTaskAttemptPath(TaskAttemptContext context) {
return new Path(getPendingTaskAttemptsPath(context),
String.valueOf(context.getTaskAttemptID()));
}
/**
* Compute the path where the output of a task attempt is stored until
* that task is committed.
*
* @param context the context of the task attempt.
* @param out The output path to put things in.
* @return the path where a task attempt should be stored.
*/
public static Path getTaskAttemptPath(TaskAttemptContext context, Path out) {
return new Path(getPendingTaskAttemptsPath(context, out),
String.valueOf(context.getTaskAttemptID()));
}
/**
* Compute the path where the output of a committed task is stored until
* the entire job is committed.
* @param context the context of the task attempt
* @return the path where the output of a committed task is stored until
* the entire job is committed.
*/
public Path getCommittedTaskPath(TaskAttemptContext context) {
return getCommittedTaskPath(getAppAttemptId(context), context);
}
public static Path getCommittedTaskPath(TaskAttemptContext context, Path out) {
return getCommittedTaskPath(getAppAttemptId(context), context, out);
}
/**
* Compute the path where the output of a committed task is stored until the
* entire job is committed for a specific application attempt.
* @param appAttemptId the id of the application attempt to use
* @param context the context of any task.
* @return the path where the output of a committed task is stored.
*/
protected Path getCommittedTaskPath(int appAttemptId, TaskAttemptContext context) {
return new Path(getJobAttemptPath(appAttemptId),
String.valueOf(context.getTaskAttemptID().getTaskID()));
}
private static Path getCommittedTaskPath(int appAttemptId, TaskAttemptContext context, Path out) {
return new Path(getJobAttemptPath(appAttemptId, out),
String.valueOf(context.getTaskAttemptID().getTaskID()));
}
private static class CommittedTaskFilter implements PathFilter {
@Override
public boolean accept(Path path) {
return !PENDING_DIR_NAME.equals(path.getName());
}
}
/**
* Get a list of all paths where output from committed tasks are stored.
* @param context the context of the current job
* @return the list of these Paths/FileStatuses.
* @throws IOException
*/
private FileStatus[] getAllCommittedTaskPaths(JobContext context)
throws IOException {
Path jobAttemptPath = getJobAttemptPath(context);
FileSystem fs = jobAttemptPath.getFileSystem(context.getConfiguration());
return fs.listStatus(jobAttemptPath, new CommittedTaskFilter());
}
/**
* Get the directory that the task should write results into.
* @return the work directory
* @throws IOException
*/
public Path getWorkPath() throws IOException {
return workPath;
}
/**
* Create the temporary directory that is the root of all of the task
* work directories.
* @param context the job's context
*/
public void setupJob(JobContext context) throws IOException {
if (hasOutputPath()) {
Path jobAttemptPath = getJobAttemptPath(context);
FileSystem fs = jobAttemptPath.getFileSystem(
context.getConfiguration());
if (!fs.mkdirs(jobAttemptPath)) {
LOG.error("Mkdirs failed to create " + jobAttemptPath);
}
} else {
LOG.warn("Output Path is null in setupJob()");
}
}
/**
* The job has completed so move all committed tasks to the final output dir.
* Delete the temporary directory, including all of the work directories.
* Create a _SUCCESS file to make it as successful.
* @param context the job's context
*/
public void commitJob(JobContext context) throws IOException {
if (hasOutputPath()) {
Path finalOutput = getOutputPath();
FileSystem fs = finalOutput.getFileSystem(context.getConfiguration());
if (algorithmVersion == 1) {
for (FileStatus stat: getAllCommittedTaskPaths(context)) {
mergePaths(fs, stat, finalOutput);
}
}
// delete the _temporary folder and create a _done file in the o/p folder
cleanupJob(context);
// True if the job requires output.dir marked on successful job.
// Note that by default it is set to true.
if (context.getConfiguration().getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, true)) {
Path markerPath = new Path(outputPath, SUCCEEDED_FILE_NAME);
fs.create(markerPath).close();
}
} else {
LOG.warn("Output Path is null in commitJob()");
}
}
/**
* Merge two paths together. Anything in from will be moved into to, if there
* are any name conflicts while merging the files or directories in from win.
* @param fs the File System to use
* @param from the path data is coming from.
* @param to the path data is going to.
* @throws IOException on any error
*/
private void mergePaths(FileSystem fs, final FileStatus from,
final Path to) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Merging data from " + from + " to " + to);
}
FileStatus toStat;
try {
toStat = fs.getFileStatus(to);
} catch (FileNotFoundException fnfe) {
toStat = null;
}
if (from.isFile()) {
if (toStat != null) {
if (!fs.delete(to, true)) {
throw new IOException("Failed to delete " + to);
}
}
if (!fs.rename(from.getPath(), to)) {
throw new IOException("Failed to rename " + from + " to " + to);
}
} else if (from.isDirectory()) {
if (toStat != null) {
if (!toStat.isDirectory()) {
if (!fs.delete(to, true)) {
throw new IOException("Failed to delete " + to);
}
renameOrMerge(fs, from, to);
} else {
//It is a directory so merge everything in the directories
for (FileStatus subFrom : fs.listStatus(from.getPath())) {
Path subTo = new Path(to, subFrom.getPath().getName());
mergePaths(fs, subFrom, subTo);
}
}
} else {
renameOrMerge(fs, from, to);
}
}
}
private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
throws IOException {
if (algorithmVersion == 1) {
if (!fs.rename(from.getPath(), to)) {
throw new IOException("Failed to rename " + from + " to " + to);
}
} else {
fs.mkdirs(to);
for (FileStatus subFrom : fs.listStatus(from.getPath())) {
Path subTo = new Path(to, subFrom.getPath().getName());
mergePaths(fs, subFrom, subTo);
}
}
}
@Override
@Deprecated
public void cleanupJob(JobContext context) throws IOException {
if (hasOutputPath()) {
Path pendingJobAttemptsPath = getPendingJobAttemptsPath();
FileSystem fs = pendingJobAttemptsPath
.getFileSystem(context.getConfiguration());
fs.delete(pendingJobAttemptsPath, true);
} else {
LOG.warn("Output Path is null in cleanupJob()");
}
}
/**
* Delete the temporary directory, including all of the work directories.
* @param context the job's context
*/
@Override
public void abortJob(JobContext context, JobStatus.State state)
throws IOException {
// delete the _temporary folder
cleanupJob(context);
}
/**
* No task setup required.
*/
@Override
public void setupTask(TaskAttemptContext context) throws IOException {
// FileOutputCommitter's setupTask doesn't do anything. Because the
// temporary task directory is created on demand when the
// task is writing.
}
/**
* Move the files from the work directory to the job output directory
* @param context the task context
*/
@Override
public void commitTask(TaskAttemptContext context)
throws IOException {
commitTask(context, null);
}
@Private
public void commitTask(TaskAttemptContext context, Path taskAttemptPath)
throws IOException {
TaskAttemptID attemptId = context.getTaskAttemptID();
if (hasOutputPath()) {
context.progress();
if(taskAttemptPath == null) {
taskAttemptPath = getTaskAttemptPath(context);
}
FileSystem fs = taskAttemptPath.getFileSystem(context.getConfiguration());
FileStatus taskAttemptDirStatus;
try {
taskAttemptDirStatus = fs.getFileStatus(taskAttemptPath);
} catch (FileNotFoundException e) {
taskAttemptDirStatus = null;
}
if (taskAttemptDirStatus != null) {
if (algorithmVersion == 1) {
Path committedTaskPath = getCommittedTaskPath(context);
if (fs.exists(committedTaskPath)) {
if (!fs.delete(committedTaskPath, true)) {
throw new IOException("Could not delete " + committedTaskPath);
}
}
if (!fs.rename(taskAttemptPath, committedTaskPath)) {
throw new IOException("Could not rename " + taskAttemptPath + " to "
+ committedTaskPath);
}
LOG.info("Saved output of task '" + attemptId + "' to " +
committedTaskPath);
} else {
// directly merge everything from taskAttemptPath to output directory
mergePaths(fs, taskAttemptDirStatus, outputPath);
LOG.info("Saved output of task '" + attemptId + "' to " +
outputPath);
}
} else {
LOG.warn("No Output found for " + attemptId);
}
} else {
LOG.warn("Output Path is null in commitTask()");
}
}
/**
* Delete the work directory
* @throws IOException
*/
@Override
public void abortTask(TaskAttemptContext context) throws IOException {
abortTask(context, null);
}
@Private
public void abortTask(TaskAttemptContext context, Path taskAttemptPath) throws IOException {
if (hasOutputPath()) {
context.progress();
if(taskAttemptPath == null) {
taskAttemptPath = getTaskAttemptPath(context);
}
FileSystem fs = taskAttemptPath.getFileSystem(context.getConfiguration());
if(!fs.delete(taskAttemptPath, true)) {
LOG.warn("Could not delete "+taskAttemptPath);
}
} else {
LOG.warn("Output Path is null in abortTask()");
}
}
/**
* Did this task write any files in the work directory?
* @param context the task's context
*/
@Override
public boolean needsTaskCommit(TaskAttemptContext context
) throws IOException {
return needsTaskCommit(context, null);
}
@Private
public boolean needsTaskCommit(TaskAttemptContext context, Path taskAttemptPath
) throws IOException {
if(hasOutputPath()) {
if(taskAttemptPath == null) {
taskAttemptPath = getTaskAttemptPath(context);
}
FileSystem fs = taskAttemptPath.getFileSystem(context.getConfiguration());
return fs.exists(taskAttemptPath);
}
return false;
}
@Override
@Deprecated
public boolean isRecoverySupported() {
return true;
}
@Override
public void recoverTask(TaskAttemptContext context)
throws IOException {
if(hasOutputPath()) {
context.progress();
TaskAttemptID attemptId = context.getTaskAttemptID();
int previousAttempt = getAppAttemptId(context) - 1;
if (previousAttempt < 0) {
throw new IOException ("Cannot recover task output for first attempt...");
}
Path previousCommittedTaskPath = getCommittedTaskPath(
previousAttempt, context);
FileSystem fs = previousCommittedTaskPath.getFileSystem(context.getConfiguration());
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to recover task from " + previousCommittedTaskPath);
}
if (algorithmVersion == 1) {
if (fs.exists(previousCommittedTaskPath)) {
Path committedTaskPath = getCommittedTaskPath(context);
if (fs.exists(committedTaskPath)) {
if (!fs.delete(committedTaskPath, true)) {
throw new IOException("Could not delete "+committedTaskPath);
}
}
//Rename can fail if the parent directory does not yet exist.
Path committedParent = committedTaskPath.getParent();
fs.mkdirs(committedParent);
if (!fs.rename(previousCommittedTaskPath, committedTaskPath)) {
throw new IOException("Could not rename " + previousCommittedTaskPath +
" to " + committedTaskPath);
}
} else {
LOG.warn(attemptId+" had no output to recover.");
}
} else {
// essentially a no-op, but for backwards compatibility
// after upgrade to the new fileOutputCommitter,
// check if there are any output left in committedTaskPath
if (fs.exists(previousCommittedTaskPath)) {
LOG.info("Recovering task for upgrading scenario, moving files from "
+ previousCommittedTaskPath + " to " + outputPath);
FileStatus from = fs.getFileStatus(previousCommittedTaskPath);
mergePaths(fs, from, outputPath);
}
LOG.info("Done recovering task " + attemptId);
}
} else {
LOG.warn("Output Path is null in recoverTask()");
}
}
}
| 21,291 | 34.310116 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/** An {@link OutputFormat} that writes {@link SequenceFile}s. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileOutputFormat <K,V> extends FileOutputFormat<K, V> {
protected SequenceFile.Writer getSequenceWriter(TaskAttemptContext context,
Class<?> keyClass, Class<?> valueClass)
throws IOException {
Configuration conf = context.getConfiguration();
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(context)) {
// find the kind of compression to do
compressionType = getOutputCompressionType(context);
// find the right codec
Class<?> codecClass = getOutputCompressorClass(context,
DefaultCodec.class);
codec = (CompressionCodec)
ReflectionUtils.newInstance(codecClass, conf);
}
// get the path of the temporary output file
Path file = getDefaultWorkFile(context, "");
FileSystem fs = file.getFileSystem(conf);
return SequenceFile.createWriter(fs, conf, file,
keyClass,
valueClass,
compressionType,
codec,
context);
}
public RecordWriter<K, V>
getRecordWriter(TaskAttemptContext context
) throws IOException, InterruptedException {
final SequenceFile.Writer out = getSequenceWriter(context,
context.getOutputKeyClass(), context.getOutputValueClass());
return new RecordWriter<K, V>() {
public void write(K key, V value)
throws IOException {
out.append(key, value);
}
public void close(TaskAttemptContext context) throws IOException {
out.close();
}
};
}
/**
* Get the {@link CompressionType} for the output {@link SequenceFile}.
* @param job the {@link Job}
* @return the {@link CompressionType} for the output {@link SequenceFile},
* defaulting to {@link CompressionType#RECORD}
*/
public static CompressionType getOutputCompressionType(JobContext job) {
String val = job.getConfiguration().get(FileOutputFormat.COMPRESS_TYPE,
CompressionType.RECORD.toString());
return CompressionType.valueOf(val);
}
/**
* Set the {@link CompressionType} for the output {@link SequenceFile}.
* @param job the {@link Job} to modify
* @param style the {@link CompressionType} for the output
* {@link SequenceFile}
*/
public static void setOutputCompressionType(Job job,
CompressionType style) {
setCompressOutput(job, true);
job.getConfiguration().set(FileOutputFormat.COMPRESS_TYPE,
style.toString());
}
}
| 4,459 | 36.478992 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import java.io.DataOutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.ValueBytes;
import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* An {@link org.apache.hadoop.mapreduce.OutputFormat} that writes keys,
* values to {@link SequenceFile}s in binary(raw) format
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileAsBinaryOutputFormat
extends SequenceFileOutputFormat <BytesWritable,BytesWritable> {
public static String KEY_CLASS = "mapreduce.output.seqbinaryoutputformat.key.class";
public static String VALUE_CLASS = "mapreduce.output.seqbinaryoutputformat.value.class";
/**
* Inner class used for appendRaw
*/
static public class WritableValueBytes implements ValueBytes {
private BytesWritable value;
public WritableValueBytes() {
this.value = null;
}
public WritableValueBytes(BytesWritable value) {
this.value = value;
}
public void reset(BytesWritable value) {
this.value = value;
}
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException {
outStream.write(value.getBytes(), 0, value.getLength());
}
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException {
throw new UnsupportedOperationException(
"WritableValueBytes doesn't support RECORD compression");
}
public int getSize(){
return value.getLength();
}
}
/**
* Set the key class for the {@link SequenceFile}
* <p>This allows the user to specify the key class to be different
* from the actual class ({@link BytesWritable}) used for writing </p>
*
* @param job the {@link Job} to modify
* @param theClass the SequenceFile output key class.
*/
static public void setSequenceFileOutputKeyClass(Job job,
Class<?> theClass) {
job.getConfiguration().setClass(KEY_CLASS,
theClass, Object.class);
}
/**
* Set the value class for the {@link SequenceFile}
* <p>This allows the user to specify the value class to be different
* from the actual class ({@link BytesWritable}) used for writing </p>
*
* @param job the {@link Job} to modify
* @param theClass the SequenceFile output key class.
*/
static public void setSequenceFileOutputValueClass(Job job,
Class<?> theClass) {
job.getConfiguration().setClass(VALUE_CLASS,
theClass, Object.class);
}
/**
* Get the key class for the {@link SequenceFile}
*
* @return the key class of the {@link SequenceFile}
*/
static public Class<? extends WritableComparable>
getSequenceFileOutputKeyClass(JobContext job) {
return job.getConfiguration().getClass(KEY_CLASS,
job.getOutputKeyClass().asSubclass(WritableComparable.class),
WritableComparable.class);
}
/**
* Get the value class for the {@link SequenceFile}
*
* @return the value class of the {@link SequenceFile}
*/
static public Class<? extends Writable> getSequenceFileOutputValueClass(
JobContext job) {
return job.getConfiguration().getClass(VALUE_CLASS,
job.getOutputValueClass().asSubclass(Writable.class), Writable.class);
}
@Override
public RecordWriter<BytesWritable, BytesWritable> getRecordWriter(
TaskAttemptContext context) throws IOException {
final SequenceFile.Writer out = getSequenceWriter(context,
getSequenceFileOutputKeyClass(context),
getSequenceFileOutputValueClass(context));
return new RecordWriter<BytesWritable, BytesWritable>() {
private WritableValueBytes wvaluebytes = new WritableValueBytes();
public void write(BytesWritable bkey, BytesWritable bvalue)
throws IOException {
wvaluebytes.reset(bvalue);
out.appendRaw(bkey.getBytes(), 0, bkey.getLength(), wvaluebytes);
wvaluebytes.reset(null);
}
public void close(TaskAttemptContext context) throws IOException {
out.close();
}
};
}
@Override
public void checkOutputSpecs(JobContext job) throws IOException {
super.checkOutputSpecs(job);
if (getCompressOutput(job) &&
getOutputCompressionType(job) == CompressionType.RECORD ) {
throw new InvalidJobConfException("SequenceFileAsBinaryOutputFormat "
+ "doesn't support Record Compression" );
}
}
}
| 5,820 | 34.066265 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.reduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Reducer;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class IntSumReducer<Key> extends Reducer<Key,IntWritable,
Key,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Key key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
| 1,612 | 35.659091 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.reduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Reducer;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongSumReducer<KEY> extends Reducer<KEY, LongWritable,
KEY,LongWritable> {
private LongWritable result = new LongWritable();
public void reduce(KEY key, Iterable<LongWritable> values,
Context context) throws IOException, InterruptedException {
long sum = 0;
for (LongWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
| 1,623 | 35.088889 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/reduce/WrappedReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.reduce;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.ReduceContext;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.security.Credentials;
/**
* A {@link Reducer} which wraps a given one to allow for custom
* {@link Reducer.Context} implementations.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
extends Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
/**
* A a wrapped {@link Reducer.Context} for custom implementations.
* @param reduceContext <code>ReduceContext</code> to be wrapped
* @return a wrapped <code>Reducer.Context</code> for custom implementations
*/
public Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context
getReducerContext(ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reduceContext) {
return new Context(reduceContext);
}
@InterfaceStability.Evolving
public class Context
extends Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context {
protected ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reduceContext;
public Context(ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reduceContext)
{
this.reduceContext = reduceContext;
}
@Override
public KEYIN getCurrentKey() throws IOException, InterruptedException {
return reduceContext.getCurrentKey();
}
@Override
public VALUEIN getCurrentValue() throws IOException, InterruptedException {
return reduceContext.getCurrentValue();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return reduceContext.nextKeyValue();
}
@Override
public Counter getCounter(Enum counterName) {
return reduceContext.getCounter(counterName);
}
@Override
public Counter getCounter(String groupName, String counterName) {
return reduceContext.getCounter(groupName, counterName);
}
@Override
public OutputCommitter getOutputCommitter() {
return reduceContext.getOutputCommitter();
}
@Override
public void write(KEYOUT key, VALUEOUT value) throws IOException,
InterruptedException {
reduceContext.write(key, value);
}
@Override
public String getStatus() {
return reduceContext.getStatus();
}
@Override
public TaskAttemptID getTaskAttemptID() {
return reduceContext.getTaskAttemptID();
}
@Override
public void setStatus(String msg) {
reduceContext.setStatus(msg);
}
@Override
public Path[] getArchiveClassPaths() {
return reduceContext.getArchiveClassPaths();
}
@Override
public String[] getArchiveTimestamps() {
return reduceContext.getArchiveTimestamps();
}
@Override
public URI[] getCacheArchives() throws IOException {
return reduceContext.getCacheArchives();
}
@Override
public URI[] getCacheFiles() throws IOException {
return reduceContext.getCacheFiles();
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getCombinerClass()
throws ClassNotFoundException {
return reduceContext.getCombinerClass();
}
@Override
public Configuration getConfiguration() {
return reduceContext.getConfiguration();
}
@Override
public Path[] getFileClassPaths() {
return reduceContext.getFileClassPaths();
}
@Override
public String[] getFileTimestamps() {
return reduceContext.getFileTimestamps();
}
@Override
public RawComparator<?> getCombinerKeyGroupingComparator() {
return reduceContext.getCombinerKeyGroupingComparator();
}
@Override
public RawComparator<?> getGroupingComparator() {
return reduceContext.getGroupingComparator();
}
@Override
public Class<? extends InputFormat<?, ?>> getInputFormatClass()
throws ClassNotFoundException {
return reduceContext.getInputFormatClass();
}
@Override
public String getJar() {
return reduceContext.getJar();
}
@Override
public JobID getJobID() {
return reduceContext.getJobID();
}
@Override
public String getJobName() {
return reduceContext.getJobName();
}
@Override
public boolean getJobSetupCleanupNeeded() {
return reduceContext.getJobSetupCleanupNeeded();
}
@Override
public boolean getTaskCleanupNeeded() {
return reduceContext.getTaskCleanupNeeded();
}
@Override
public Path[] getLocalCacheArchives() throws IOException {
return reduceContext.getLocalCacheArchives();
}
@Override
public Path[] getLocalCacheFiles() throws IOException {
return reduceContext.getLocalCacheFiles();
}
@Override
public Class<?> getMapOutputKeyClass() {
return reduceContext.getMapOutputKeyClass();
}
@Override
public Class<?> getMapOutputValueClass() {
return reduceContext.getMapOutputValueClass();
}
@Override
public Class<? extends Mapper<?, ?, ?, ?>> getMapperClass()
throws ClassNotFoundException {
return reduceContext.getMapperClass();
}
@Override
public int getMaxMapAttempts() {
return reduceContext.getMaxMapAttempts();
}
@Override
public int getMaxReduceAttempts() {
return reduceContext.getMaxReduceAttempts();
}
@Override
public int getNumReduceTasks() {
return reduceContext.getNumReduceTasks();
}
@Override
public Class<? extends OutputFormat<?, ?>> getOutputFormatClass()
throws ClassNotFoundException {
return reduceContext.getOutputFormatClass();
}
@Override
public Class<?> getOutputKeyClass() {
return reduceContext.getOutputKeyClass();
}
@Override
public Class<?> getOutputValueClass() {
return reduceContext.getOutputValueClass();
}
@Override
public Class<? extends Partitioner<?, ?>> getPartitionerClass()
throws ClassNotFoundException {
return reduceContext.getPartitionerClass();
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getReducerClass()
throws ClassNotFoundException {
return reduceContext.getReducerClass();
}
@Override
public RawComparator<?> getSortComparator() {
return reduceContext.getSortComparator();
}
@Override
public boolean getSymlink() {
return reduceContext.getSymlink();
}
@Override
public Path getWorkingDirectory() throws IOException {
return reduceContext.getWorkingDirectory();
}
@Override
public void progress() {
reduceContext.progress();
}
@Override
public Iterable<VALUEIN> getValues() throws IOException,
InterruptedException {
return reduceContext.getValues();
}
@Override
public boolean nextKey() throws IOException, InterruptedException {
return reduceContext.nextKey();
}
@Override
public boolean getProfileEnabled() {
return reduceContext.getProfileEnabled();
}
@Override
public String getProfileParams() {
return reduceContext.getProfileParams();
}
@Override
public IntegerRanges getProfileTaskRange(boolean isMap) {
return reduceContext.getProfileTaskRange(isMap);
}
@Override
public String getUser() {
return reduceContext.getUser();
}
@Override
public Credentials getCredentials() {
return reduceContext.getCredentials();
}
@Override
public float getProgress() {
return reduceContext.getProgress();
}
}
}
| 9,147 | 26.22619 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.StringUtils;
/**
* A InputFormat that reads input data from an SQL table.
* <p>
* DBInputFormat emits LongWritables containing the record number as
* key and DBWritables as value.
*
* The SQL query, and input class can be using one of the two
* setInput methods.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DBInputFormat<T extends DBWritable>
extends InputFormat<LongWritable, T> implements Configurable {
private static final Log LOG = LogFactory.getLog(DBInputFormat.class);
protected String dbProductName = "DEFAULT";
/**
* A Class that does nothing, implementing DBWritable
*/
@InterfaceStability.Evolving
public static class NullDBWritable implements DBWritable, Writable {
@Override
public void readFields(DataInput in) throws IOException { }
@Override
public void readFields(ResultSet arg0) throws SQLException { }
@Override
public void write(DataOutput out) throws IOException { }
@Override
public void write(PreparedStatement arg0) throws SQLException { }
}
/**
* A InputSplit that spans a set of rows
*/
@InterfaceStability.Evolving
public static class DBInputSplit extends InputSplit implements Writable {
private long end = 0;
private long start = 0;
/**
* Default Constructor
*/
public DBInputSplit() {
}
/**
* Convenience Constructor
* @param start the index of the first row to select
* @param end the index of the last row to select
*/
public DBInputSplit(long start, long end) {
this.start = start;
this.end = end;
}
/** {@inheritDoc} */
public String[] getLocations() throws IOException {
// TODO Add a layer to enable SQL "sharding" and support locality
return new String[] {};
}
/**
* @return The index of the first row to select
*/
public long getStart() {
return start;
}
/**
* @return The index of the last row to select
*/
public long getEnd() {
return end;
}
/**
* @return The total row count in this split
*/
public long getLength() throws IOException {
return end - start;
}
/** {@inheritDoc} */
public void readFields(DataInput input) throws IOException {
start = input.readLong();
end = input.readLong();
}
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {
output.writeLong(start);
output.writeLong(end);
}
}
protected String conditions;
protected Connection connection;
protected String tableName;
protected String[] fieldNames;
protected DBConfiguration dbConf;
/** {@inheritDoc} */
public void setConf(Configuration conf) {
dbConf = new DBConfiguration(conf);
try {
this.connection = createConnection();
DatabaseMetaData dbMeta = connection.getMetaData();
this.dbProductName =
StringUtils.toUpperCase(dbMeta.getDatabaseProductName());
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
tableName = dbConf.getInputTableName();
fieldNames = dbConf.getInputFieldNames();
conditions = dbConf.getInputConditions();
}
public Configuration getConf() {
return dbConf.getConf();
}
public DBConfiguration getDBConf() {
return dbConf;
}
public Connection getConnection() {
// TODO Remove this code that handles backward compatibility.
if (this.connection == null) {
this.connection = createConnection();
}
return this.connection;
}
public Connection createConnection() {
try {
Connection newConnection = dbConf.getConnection();
newConnection.setAutoCommit(false);
newConnection.setTransactionIsolation(
Connection.TRANSACTION_SERIALIZABLE);
return newConnection;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public String getDBProductName() {
return dbProductName;
}
protected RecordReader<LongWritable, T> createDBRecordReader(DBInputSplit split,
Configuration conf) throws IOException {
@SuppressWarnings("unchecked")
Class<T> inputClass = (Class<T>) (dbConf.getInputClass());
try {
// use database product name to determine appropriate record reader.
if (dbProductName.startsWith("ORACLE")) {
// use Oracle-specific db reader.
return new OracleDBRecordReader<T>(split, inputClass,
conf, createConnection(), getDBConf(), conditions, fieldNames,
tableName);
} else if (dbProductName.startsWith("MYSQL")) {
// use MySQL-specific db reader.
return new MySQLDBRecordReader<T>(split, inputClass,
conf, createConnection(), getDBConf(), conditions, fieldNames,
tableName);
} else {
// Generic reader.
return new DBRecordReader<T>(split, inputClass,
conf, createConnection(), getDBConf(), conditions, fieldNames,
tableName);
}
} catch (SQLException ex) {
throw new IOException(ex.getMessage());
}
}
/** {@inheritDoc} */
public RecordReader<LongWritable, T> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
return createDBRecordReader((DBInputSplit) split, context.getConfiguration());
}
/** {@inheritDoc} */
public List<InputSplit> getSplits(JobContext job) throws IOException {
ResultSet results = null;
Statement statement = null;
try {
statement = connection.createStatement();
results = statement.executeQuery(getCountQuery());
results.next();
long count = results.getLong(1);
int chunks = job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
long chunkSize = (count / chunks);
results.close();
statement.close();
List<InputSplit> splits = new ArrayList<InputSplit>();
// Split the rows into n-number of chunks and adjust the last chunk
// accordingly
for (int i = 0; i < chunks; i++) {
DBInputSplit split;
if ((i + 1) == chunks)
split = new DBInputSplit(i * chunkSize, count);
else
split = new DBInputSplit(i * chunkSize, (i * chunkSize)
+ chunkSize);
splits.add(split);
}
connection.commit();
return splits;
} catch (SQLException e) {
throw new IOException("Got SQLException", e);
} finally {
try {
if (results != null) { results.close(); }
} catch (SQLException e1) {}
try {
if (statement != null) { statement.close(); }
} catch (SQLException e1) {}
closeConnection();
}
}
/** Returns the query for getting the total number of rows,
* subclasses can override this for custom behaviour.*/
protected String getCountQuery() {
if(dbConf.getInputCountQuery() != null) {
return dbConf.getInputCountQuery();
}
StringBuilder query = new StringBuilder();
query.append("SELECT COUNT(*) FROM " + tableName);
if (conditions != null && conditions.length() > 0)
query.append(" WHERE " + conditions);
return query.toString();
}
/**
* Initializes the map-part of the job with the appropriate input settings.
*
* @param job The map-reduce job
* @param inputClass the class object implementing DBWritable, which is the
* Java object holding tuple fields.
* @param tableName The table to read data from
* @param conditions The condition which to select data with,
* eg. '(updated > 20070101 AND length > 0)'
* @param orderBy the fieldNames in the orderBy clause.
* @param fieldNames The field names in the table
* @see #setInput(Job, Class, String, String)
*/
public static void setInput(Job job,
Class<? extends DBWritable> inputClass,
String tableName,String conditions,
String orderBy, String... fieldNames) {
job.setInputFormatClass(DBInputFormat.class);
DBConfiguration dbConf = new DBConfiguration(job.getConfiguration());
dbConf.setInputClass(inputClass);
dbConf.setInputTableName(tableName);
dbConf.setInputFieldNames(fieldNames);
dbConf.setInputConditions(conditions);
dbConf.setInputOrderBy(orderBy);
}
/**
* Initializes the map-part of the job with the appropriate input settings.
*
* @param job The map-reduce job
* @param inputClass the class object implementing DBWritable, which is the
* Java object holding tuple fields.
* @param inputQuery the input query to select fields. Example :
* "SELECT f1, f2, f3 FROM Mytable ORDER BY f1"
* @param inputCountQuery the input query that returns
* the number of records in the table.
* Example : "SELECT COUNT(f1) FROM Mytable"
* @see #setInput(Job, Class, String, String, String, String...)
*/
public static void setInput(Job job,
Class<? extends DBWritable> inputClass,
String inputQuery, String inputCountQuery) {
job.setInputFormatClass(DBInputFormat.class);
DBConfiguration dbConf = new DBConfiguration(job.getConfiguration());
dbConf.setInputClass(inputClass);
dbConf.setInputQuery(inputQuery);
dbConf.setInputCountQuery(inputCountQuery);
}
protected void closeConnection() {
try {
if (null != this.connection) {
this.connection.close();
this.connection = null;
}
} catch (SQLException sqlE) {
LOG.debug("Exception on close", sqlE);
}
}
}
| 11,431 | 29.566845 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* Objects that are read from/written to a database should implement
* <code>DBWritable</code>. DBWritable, is similar to {@link Writable}
* except that the {@link #write(PreparedStatement)} method takes a
* {@link PreparedStatement}, and {@link #readFields(ResultSet)}
* takes a {@link ResultSet}.
* <p>
* Implementations are responsible for writing the fields of the object
* to PreparedStatement, and reading the fields of the object from the
* ResultSet.
*
* <p>Example:</p>
* If we have the following table in the database :
* <pre>
* CREATE TABLE MyTable (
* counter INTEGER NOT NULL,
* timestamp BIGINT NOT NULL,
* );
* </pre>
* then we can read/write the tuples from/to the table with :
* <p><pre>
* public class MyWritable implements Writable, DBWritable {
* // Some data
* private int counter;
* private long timestamp;
*
* //Writable#write() implementation
* public void write(DataOutput out) throws IOException {
* out.writeInt(counter);
* out.writeLong(timestamp);
* }
*
* //Writable#readFields() implementation
* public void readFields(DataInput in) throws IOException {
* counter = in.readInt();
* timestamp = in.readLong();
* }
*
* public void write(PreparedStatement statement) throws SQLException {
* statement.setInt(1, counter);
* statement.setLong(2, timestamp);
* }
*
* public void readFields(ResultSet resultSet) throws SQLException {
* counter = resultSet.getInt(1);
* timestamp = resultSet.getLong(2);
* }
* }
* </pre>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface DBWritable {
/**
* Sets the fields of the object in the {@link PreparedStatement}.
* @param statement the statement that the fields are put into.
* @throws SQLException
*/
public void write(PreparedStatement statement) throws SQLException;
/**
* Reads the fields of the object from the {@link ResultSet}.
* @param resultSet the {@link ResultSet} to get the fields from.
* @throws SQLException
*/
public void readFields(ResultSet resultSet) throws SQLException ;
}
| 3,283 | 33.208333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* Implement DBSplitter over floating-point values.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class FloatSplitter implements DBSplitter {
private static final Log LOG = LogFactory.getLog(FloatSplitter.class);
private static final double MIN_INCREMENT = 10000 * Double.MIN_VALUE;
public List<InputSplit> split(Configuration conf, ResultSet results, String colName)
throws SQLException {
LOG.warn("Generating splits for a floating-point index column. Due to the");
LOG.warn("imprecise representation of floating-point values in Java, this");
LOG.warn("may result in an incomplete import.");
LOG.warn("You are strongly encouraged to choose an integral split column.");
List<InputSplit> splits = new ArrayList<InputSplit>();
if (results.getString(1) == null && results.getString(2) == null) {
// Range is null to null. Return a null split accordingly.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
return splits;
}
double minVal = results.getDouble(1);
double maxVal = results.getDouble(2);
// Use this as a hint. May need an extra task if the size doesn't
// divide cleanly.
int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
double splitSize = (maxVal - minVal) / (double) numSplits;
if (splitSize < MIN_INCREMENT) {
splitSize = MIN_INCREMENT;
}
String lowClausePrefix = colName + " >= ";
String highClausePrefix = colName + " < ";
double curLower = minVal;
double curUpper = curLower + splitSize;
while (curUpper < maxVal) {
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + Double.toString(curLower),
highClausePrefix + Double.toString(curUpper)));
curLower = curUpper;
curUpper += splitSize;
}
// Catch any overage and create the closed interval for the last split.
if (curLower <= maxVal || splits.size() == 1) {
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + Double.toString(curLower),
colName + " <= " + Double.toString(maxVal)));
}
if (results.getString(1) == null || results.getString(2) == null) {
// At least one extrema is null; add a null split.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
}
return splits;
}
}
| 3,811 | 34.962264 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
/**
* A RecordReader that reads records from a SQL table.
* Emits LongWritables containing the record number as
* key and DBWritables as value.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DBRecordReader<T extends DBWritable> extends
RecordReader<LongWritable, T> {
private static final Log LOG = LogFactory.getLog(DBRecordReader.class);
private ResultSet results = null;
private Class<T> inputClass;
private Configuration conf;
private DBInputFormat.DBInputSplit split;
private long pos = 0;
private LongWritable key = null;
private T value = null;
private Connection connection;
protected PreparedStatement statement;
private DBConfiguration dbConf;
private String conditions;
private String [] fieldNames;
private String tableName;
/**
* @param split The InputSplit to read data for
* @throws SQLException
*/
public DBRecordReader(DBInputFormat.DBInputSplit split,
Class<T> inputClass, Configuration conf, Connection conn, DBConfiguration dbConfig,
String cond, String [] fields, String table)
throws SQLException {
this.inputClass = inputClass;
this.split = split;
this.conf = conf;
this.connection = conn;
this.dbConf = dbConfig;
this.conditions = cond;
this.fieldNames = fields;
this.tableName = table;
}
protected ResultSet executeQuery(String query) throws SQLException {
this.statement = connection.prepareStatement(query,
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
return statement.executeQuery();
}
/** Returns the query for selecting the records,
* subclasses can override this for custom behaviour.*/
protected String getSelectQuery() {
StringBuilder query = new StringBuilder();
// Default codepath for MySQL, HSQLDB, etc. Relies on LIMIT/OFFSET for splits.
if(dbConf.getInputQuery() == null) {
query.append("SELECT ");
for (int i = 0; i < fieldNames.length; i++) {
query.append(fieldNames[i]);
if (i != fieldNames.length -1) {
query.append(", ");
}
}
query.append(" FROM ").append(tableName);
query.append(" AS ").append(tableName); //in hsqldb this is necessary
if (conditions != null && conditions.length() > 0) {
query.append(" WHERE (").append(conditions).append(")");
}
String orderBy = dbConf.getInputOrderBy();
if (orderBy != null && orderBy.length() > 0) {
query.append(" ORDER BY ").append(orderBy);
}
} else {
//PREBUILT QUERY
query.append(dbConf.getInputQuery());
}
try {
query.append(" LIMIT ").append(split.getLength());
query.append(" OFFSET ").append(split.getStart());
} catch (IOException ex) {
// Ignore, will not throw.
}
return query.toString();
}
/** {@inheritDoc} */
public void close() throws IOException {
try {
if (null != results) {
results.close();
}
if (null != statement) {
statement.close();
}
if (null != connection) {
connection.commit();
connection.close();
}
} catch (SQLException e) {
throw new IOException(e.getMessage());
}
}
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
//do nothing
}
/** {@inheritDoc} */
public LongWritable getCurrentKey() {
return key;
}
/** {@inheritDoc} */
public T getCurrentValue() {
return value;
}
/**
* @deprecated
*/
@Deprecated
public T createValue() {
return ReflectionUtils.newInstance(inputClass, conf);
}
/**
* @deprecated
*/
@Deprecated
public long getPos() throws IOException {
return pos;
}
/**
* @deprecated Use {@link #nextKeyValue()}
*/
@Deprecated
public boolean next(LongWritable key, T value) throws IOException {
this.key = key;
this.value = value;
return nextKeyValue();
}
/** {@inheritDoc} */
public float getProgress() throws IOException {
return pos / (float)split.getLength();
}
/** {@inheritDoc} */
public boolean nextKeyValue() throws IOException {
try {
if (key == null) {
key = new LongWritable();
}
if (value == null) {
value = createValue();
}
if (null == this.results) {
// First time into this method, run the query.
this.results = executeQuery(getSelectQuery());
}
if (!results.next())
return false;
// Set the key field value as the output key value
key.set(pos + split.getStart());
value.readFields(results);
pos ++;
} catch (SQLException e) {
throw new IOException("SQLException in nextKeyValue", e);
}
return true;
}
protected DBInputFormat.DBInputSplit getSplit() {
return split;
}
protected String [] getFieldNames() {
return fieldNames;
}
protected String getTableName() {
return tableName;
}
protected String getConditions() {
return conditions;
}
protected DBConfiguration getDBConf() {
return dbConf;
}
protected Connection getConnection() {
return connection;
}
protected PreparedStatement getStatement() {
return statement;
}
protected void setStatement(PreparedStatement stmt) {
this.statement = stmt;
}
}
| 7,283 | 25.391304 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
/**
* A RecordReader that reads records from a SQL table,
* using data-driven WHERE clause splits.
* Emits LongWritables containing the record number as
* key and DBWritables as value.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DataDrivenDBRecordReader<T extends DBWritable> extends DBRecordReader<T> {
private static final Log LOG = LogFactory.getLog(DataDrivenDBRecordReader.class);
private String dbProductName; // database manufacturer string.
/**
* @param split The InputSplit to read data for
* @throws SQLException
*/
public DataDrivenDBRecordReader(DBInputFormat.DBInputSplit split,
Class<T> inputClass, Configuration conf, Connection conn, DBConfiguration dbConfig,
String cond, String [] fields, String table, String dbProduct)
throws SQLException {
super(split, inputClass, conf, conn, dbConfig, cond, fields, table);
this.dbProductName = dbProduct;
}
/** Returns the query for selecting the records,
* subclasses can override this for custom behaviour.*/
@SuppressWarnings("unchecked")
protected String getSelectQuery() {
StringBuilder query = new StringBuilder();
DataDrivenDBInputFormat.DataDrivenDBInputSplit dataSplit =
(DataDrivenDBInputFormat.DataDrivenDBInputSplit) getSplit();
DBConfiguration dbConf = getDBConf();
String [] fieldNames = getFieldNames();
String tableName = getTableName();
String conditions = getConditions();
// Build the WHERE clauses associated with the data split first.
// We need them in both branches of this function.
StringBuilder conditionClauses = new StringBuilder();
conditionClauses.append("( ").append(dataSplit.getLowerClause());
conditionClauses.append(" ) AND ( ").append(dataSplit.getUpperClause());
conditionClauses.append(" )");
if(dbConf.getInputQuery() == null) {
// We need to generate the entire query.
query.append("SELECT ");
for (int i = 0; i < fieldNames.length; i++) {
query.append(fieldNames[i]);
if (i != fieldNames.length -1) {
query.append(", ");
}
}
query.append(" FROM ").append(tableName);
if (!dbProductName.startsWith("ORACLE")) {
// Seems to be necessary for hsqldb? Oracle explicitly does *not*
// use this clause.
query.append(" AS ").append(tableName);
}
query.append(" WHERE ");
if (conditions != null && conditions.length() > 0) {
// Put the user's conditions first.
query.append("( ").append(conditions).append(" ) AND ");
}
// Now append the conditions associated with our split.
query.append(conditionClauses.toString());
} else {
// User provided the query. We replace the special token with our WHERE clause.
String inputQuery = dbConf.getInputQuery();
if (inputQuery.indexOf(DataDrivenDBInputFormat.SUBSTITUTE_TOKEN) == -1) {
LOG.error("Could not find the clause substitution token "
+ DataDrivenDBInputFormat.SUBSTITUTE_TOKEN + " in the query: ["
+ inputQuery + "]. Parallel splits may not work correctly.");
}
query.append(inputQuery.replace(DataDrivenDBInputFormat.SUBSTITUTE_TOKEN,
conditionClauses.toString()));
}
LOG.debug("Using query: " + query.toString());
return query.toString();
}
}
| 5,254 | 37.07971 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;
/**
* A OutputFormat that sends the reduce output to a SQL table.
* <p>
* {@link DBOutputFormat} accepts <key,value> pairs, where
* key has a type extending DBWritable. Returned {@link RecordWriter}
* writes <b>only the key</b> to the database with a batch SQL query.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DBOutputFormat<K extends DBWritable, V>
extends OutputFormat<K,V> {
private static final Log LOG = LogFactory.getLog(DBOutputFormat.class);
public void checkOutputSpecs(JobContext context)
throws IOException, InterruptedException {}
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
throws IOException, InterruptedException {
return new FileOutputCommitter(FileOutputFormat.getOutputPath(context),
context);
}
/**
* A RecordWriter that writes the reduce output to a SQL table
*/
@InterfaceStability.Evolving
public class DBRecordWriter
extends RecordWriter<K, V> {
private Connection connection;
private PreparedStatement statement;
public DBRecordWriter() throws SQLException {
}
public DBRecordWriter(Connection connection
, PreparedStatement statement) throws SQLException {
this.connection = connection;
this.statement = statement;
this.connection.setAutoCommit(false);
}
public Connection getConnection() {
return connection;
}
public PreparedStatement getStatement() {
return statement;
}
/** {@inheritDoc} */
public void close(TaskAttemptContext context) throws IOException {
try {
statement.executeBatch();
connection.commit();
} catch (SQLException e) {
try {
connection.rollback();
}
catch (SQLException ex) {
LOG.warn(StringUtils.stringifyException(ex));
}
throw new IOException(e.getMessage());
} finally {
try {
statement.close();
connection.close();
}
catch (SQLException ex) {
throw new IOException(ex.getMessage());
}
}
}
/** {@inheritDoc} */
public void write(K key, V value) throws IOException {
try {
key.write(statement);
statement.addBatch();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
/**
* Constructs the query used as the prepared statement to insert data.
*
* @param table
* the table to insert into
* @param fieldNames
* the fields to insert into. If field names are unknown, supply an
* array of nulls.
*/
public String constructQuery(String table, String[] fieldNames) {
if(fieldNames == null) {
throw new IllegalArgumentException("Field names may not be null");
}
StringBuilder query = new StringBuilder();
query.append("INSERT INTO ").append(table);
if (fieldNames.length > 0 && fieldNames[0] != null) {
query.append(" (");
for (int i = 0; i < fieldNames.length; i++) {
query.append(fieldNames[i]);
if (i != fieldNames.length - 1) {
query.append(",");
}
}
query.append(")");
}
query.append(" VALUES (");
for (int i = 0; i < fieldNames.length; i++) {
query.append("?");
if(i != fieldNames.length - 1) {
query.append(",");
}
}
query.append(");");
return query.toString();
}
/** {@inheritDoc} */
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
throws IOException {
DBConfiguration dbConf = new DBConfiguration(context.getConfiguration());
String tableName = dbConf.getOutputTableName();
String[] fieldNames = dbConf.getOutputFieldNames();
if(fieldNames == null) {
fieldNames = new String[dbConf.getOutputFieldCount()];
}
try {
Connection connection = dbConf.getConnection();
PreparedStatement statement = null;
statement = connection.prepareStatement(
constructQuery(tableName, fieldNames));
return new DBRecordWriter(connection, statement);
} catch (Exception ex) {
throw new IOException(ex.getMessage());
}
}
/**
* Initializes the reduce-part of the job with
* the appropriate output settings
*
* @param job The job
* @param tableName The table to insert data into
* @param fieldNames The field names in the table.
*/
public static void setOutput(Job job, String tableName,
String... fieldNames) throws IOException {
if(fieldNames.length > 0 && fieldNames[0] != null) {
DBConfiguration dbConf = setOutput(job, tableName);
dbConf.setOutputFieldNames(fieldNames);
} else {
if (fieldNames.length > 0) {
setOutput(job, tableName, fieldNames.length);
}
else {
throw new IllegalArgumentException(
"Field names must be greater than 0");
}
}
}
/**
* Initializes the reduce-part of the job
* with the appropriate output settings
*
* @param job The job
* @param tableName The table to insert data into
* @param fieldCount the number of fields in the table.
*/
public static void setOutput(Job job, String tableName,
int fieldCount) throws IOException {
DBConfiguration dbConf = setOutput(job, tableName);
dbConf.setOutputFieldCount(fieldCount);
}
private static DBConfiguration setOutput(Job job,
String tableName) throws IOException {
job.setOutputFormatClass(DBOutputFormat.class);
job.setReduceSpeculativeExecution(false);
DBConfiguration dbConf = new DBConfiguration(job.getConfiguration());
dbConf.setOutputTableName(tableName);
return dbConf;
}
}
| 7,447 | 30.294118 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BooleanSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
/**
* Implement DBSplitter over boolean values.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BooleanSplitter implements DBSplitter {
public List<InputSplit> split(Configuration conf, ResultSet results, String colName)
throws SQLException {
List<InputSplit> splits = new ArrayList<InputSplit>();
if (results.getString(1) == null && results.getString(2) == null) {
// Range is null to null. Return a null split accordingly.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
return splits;
}
boolean minVal = results.getBoolean(1);
boolean maxVal = results.getBoolean(2);
// Use one or two splits.
if (!minVal) {
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " = FALSE", colName + " = FALSE"));
}
if (maxVal) {
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " = TRUE", colName + " = TRUE"));
}
if (results.getString(1) == null || results.getString(2) == null) {
// Include a null value.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
}
return splits;
}
}
| 2,479 | 33.444444 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
/**
* A InputFormat that reads input data from an SQL table in an Oracle db.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class OracleDataDrivenDBInputFormat<T extends DBWritable>
extends DataDrivenDBInputFormat<T> implements Configurable {
/**
* @return the DBSplitter implementation to use to divide the table/query into InputSplits.
*/
@Override
protected DBSplitter getSplitter(int sqlDataType) {
switch (sqlDataType) {
case Types.DATE:
case Types.TIME:
case Types.TIMESTAMP:
return new OracleDateSplitter();
default:
return super.getSplitter(sqlDataType);
}
}
@Override
protected RecordReader<LongWritable, T> createDBRecordReader(DBInputSplit split,
Configuration conf) throws IOException {
DBConfiguration dbConf = getDBConf();
@SuppressWarnings("unchecked")
Class<T> inputClass = (Class<T>) (dbConf.getInputClass());
try {
// Use Oracle-specific db reader
return new OracleDataDrivenDBRecordReader<T>(split, inputClass,
conf, createConnection(), dbConf, dbConf.getInputConditions(),
dbConf.getInputFieldNames(), dbConf.getInputTableName());
} catch (SQLException ex) {
throw new IOException(ex.getMessage());
}
}
}
| 3,210 | 33.159574 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.math.BigDecimal;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* Implement DBSplitter over BigDecimal values.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BigDecimalSplitter implements DBSplitter {
private static final Log LOG = LogFactory.getLog(BigDecimalSplitter.class);
public List<InputSplit> split(Configuration conf, ResultSet results, String colName)
throws SQLException {
BigDecimal minVal = results.getBigDecimal(1);
BigDecimal maxVal = results.getBigDecimal(2);
String lowClausePrefix = colName + " >= ";
String highClausePrefix = colName + " < ";
BigDecimal numSplits = new BigDecimal(conf.getInt(MRJobConfig.NUM_MAPS, 1));
if (minVal == null && maxVal == null) {
// Range is null to null. Return a null split accordingly.
List<InputSplit> splits = new ArrayList<InputSplit>();
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
return splits;
}
if (minVal == null || maxVal == null) {
// Don't know what is a reasonable min/max value for interpolation. Fail.
LOG.error("Cannot find a range for NUMERIC or DECIMAL fields with one end NULL.");
return null;
}
// Get all the split points together.
List<BigDecimal> splitPoints = split(numSplits, minVal, maxVal);
List<InputSplit> splits = new ArrayList<InputSplit>();
// Turn the split points into a set of intervals.
BigDecimal start = splitPoints.get(0);
for (int i = 1; i < splitPoints.size(); i++) {
BigDecimal end = splitPoints.get(i);
if (i == splitPoints.size() - 1) {
// This is the last one; use a closed interval.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + start.toString(),
colName + " <= " + end.toString()));
} else {
// Normal open-interval case.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + start.toString(),
highClausePrefix + end.toString()));
}
start = end;
}
return splits;
}
private static final BigDecimal MIN_INCREMENT = new BigDecimal(10000 * Double.MIN_VALUE);
/**
* Divide numerator by denominator. If impossible in exact mode, use rounding.
*/
protected BigDecimal tryDivide(BigDecimal numerator, BigDecimal denominator) {
try {
return numerator.divide(denominator);
} catch (ArithmeticException ae) {
return numerator.divide(denominator, BigDecimal.ROUND_HALF_UP);
}
}
/**
* Returns a list of BigDecimals one element longer than the list of input splits.
* This represents the boundaries between input splits.
* All splits are open on the top end, except the last one.
*
* So the list [0, 5, 8, 12, 18] would represent splits capturing the intervals:
*
* [0, 5)
* [5, 8)
* [8, 12)
* [12, 18] note the closed interval for the last split.
*/
List<BigDecimal> split(BigDecimal numSplits, BigDecimal minVal, BigDecimal maxVal)
throws SQLException {
List<BigDecimal> splits = new ArrayList<BigDecimal>();
// Use numSplits as a hint. May need an extra task if the size doesn't
// divide cleanly.
BigDecimal splitSize = tryDivide(maxVal.subtract(minVal), (numSplits));
if (splitSize.compareTo(MIN_INCREMENT) < 0) {
splitSize = MIN_INCREMENT;
LOG.warn("Set BigDecimal splitSize to MIN_INCREMENT");
}
BigDecimal curVal = minVal;
while (curVal.compareTo(maxVal) <= 0) {
splits.add(curVal);
curVal = curVal.add(splitSize);
}
if (splits.get(splits.size() - 1).compareTo(maxVal) != 0 || splits.size() == 1) {
// We didn't end on the maxVal. Add that to the end of the list.
splits.add(maxVal);
}
return splits;
}
}
| 5,157 | 33.386667 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable;
/**
* A container for configuration property names for jobs with DB input/output.
*
* The job can be configured using the static methods in this class,
* {@link DBInputFormat}, and {@link DBOutputFormat}.
* Alternatively, the properties can be set in the configuration with proper
* values.
*
* @see DBConfiguration#configureDB(Configuration, String, String, String, String)
* @see DBInputFormat#setInput(Job, Class, String, String)
* @see DBInputFormat#setInput(Job, Class, String, String, String, String...)
* @see DBOutputFormat#setOutput(Job, String, String...)
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DBConfiguration {
/** The JDBC Driver class name */
public static final String DRIVER_CLASS_PROPERTY =
"mapreduce.jdbc.driver.class";
/** JDBC Database access URL */
public static final String URL_PROPERTY = "mapreduce.jdbc.url";
/** User name to access the database */
public static final String USERNAME_PROPERTY = "mapreduce.jdbc.username";
/** Password to access the database */
public static final String PASSWORD_PROPERTY = "mapreduce.jdbc.password";
/** Input table name */
public static final String INPUT_TABLE_NAME_PROPERTY =
"mapreduce.jdbc.input.table.name";
/** Field names in the Input table */
public static final String INPUT_FIELD_NAMES_PROPERTY =
"mapreduce.jdbc.input.field.names";
/** WHERE clause in the input SELECT statement */
public static final String INPUT_CONDITIONS_PROPERTY =
"mapreduce.jdbc.input.conditions";
/** ORDER BY clause in the input SELECT statement */
public static final String INPUT_ORDER_BY_PROPERTY =
"mapreduce.jdbc.input.orderby";
/** Whole input query, exluding LIMIT...OFFSET */
public static final String INPUT_QUERY = "mapreduce.jdbc.input.query";
/** Input query to get the count of records */
public static final String INPUT_COUNT_QUERY =
"mapreduce.jdbc.input.count.query";
/** Input query to get the max and min values of the jdbc.input.query */
public static final String INPUT_BOUNDING_QUERY =
"mapred.jdbc.input.bounding.query";
/** Class name implementing DBWritable which will hold input tuples */
public static final String INPUT_CLASS_PROPERTY =
"mapreduce.jdbc.input.class";
/** Output table name */
public static final String OUTPUT_TABLE_NAME_PROPERTY =
"mapreduce.jdbc.output.table.name";
/** Field names in the Output table */
public static final String OUTPUT_FIELD_NAMES_PROPERTY =
"mapreduce.jdbc.output.field.names";
/** Number of fields in the Output table */
public static final String OUTPUT_FIELD_COUNT_PROPERTY =
"mapreduce.jdbc.output.field.count";
/**
* Sets the DB access related fields in the {@link Configuration}.
* @param conf the configuration
* @param driverClass JDBC Driver class name
* @param dbUrl JDBC DB access URL.
* @param userName DB access username
* @param passwd DB access passwd
*/
public static void configureDB(Configuration conf, String driverClass,
String dbUrl, String userName, String passwd) {
conf.set(DRIVER_CLASS_PROPERTY, driverClass);
conf.set(URL_PROPERTY, dbUrl);
if (userName != null) {
conf.set(USERNAME_PROPERTY, userName);
}
if (passwd != null) {
conf.set(PASSWORD_PROPERTY, passwd);
}
}
/**
* Sets the DB access related fields in the JobConf.
* @param job the job
* @param driverClass JDBC Driver class name
* @param dbUrl JDBC DB access URL.
*/
public static void configureDB(Configuration job, String driverClass,
String dbUrl) {
configureDB(job, driverClass, dbUrl, null, null);
}
private Configuration conf;
public DBConfiguration(Configuration job) {
this.conf = job;
}
/** Returns a connection object o the DB
* @throws ClassNotFoundException
* @throws SQLException */
public Connection getConnection()
throws ClassNotFoundException, SQLException {
Class.forName(conf.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
if(conf.get(DBConfiguration.USERNAME_PROPERTY) == null) {
return DriverManager.getConnection(
conf.get(DBConfiguration.URL_PROPERTY));
} else {
return DriverManager.getConnection(
conf.get(DBConfiguration.URL_PROPERTY),
conf.get(DBConfiguration.USERNAME_PROPERTY),
conf.get(DBConfiguration.PASSWORD_PROPERTY));
}
}
public Configuration getConf() {
return conf;
}
public String getInputTableName() {
return conf.get(DBConfiguration.INPUT_TABLE_NAME_PROPERTY);
}
public void setInputTableName(String tableName) {
conf.set(DBConfiguration.INPUT_TABLE_NAME_PROPERTY, tableName);
}
public String[] getInputFieldNames() {
return conf.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY);
}
public void setInputFieldNames(String... fieldNames) {
conf.setStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY, fieldNames);
}
public String getInputConditions() {
return conf.get(DBConfiguration.INPUT_CONDITIONS_PROPERTY);
}
public void setInputConditions(String conditions) {
if (conditions != null && conditions.length() > 0)
conf.set(DBConfiguration.INPUT_CONDITIONS_PROPERTY, conditions);
}
public String getInputOrderBy() {
return conf.get(DBConfiguration.INPUT_ORDER_BY_PROPERTY);
}
public void setInputOrderBy(String orderby) {
if(orderby != null && orderby.length() >0) {
conf.set(DBConfiguration.INPUT_ORDER_BY_PROPERTY, orderby);
}
}
public String getInputQuery() {
return conf.get(DBConfiguration.INPUT_QUERY);
}
public void setInputQuery(String query) {
if(query != null && query.length() >0) {
conf.set(DBConfiguration.INPUT_QUERY, query);
}
}
public String getInputCountQuery() {
return conf.get(DBConfiguration.INPUT_COUNT_QUERY);
}
public void setInputCountQuery(String query) {
if(query != null && query.length() > 0) {
conf.set(DBConfiguration.INPUT_COUNT_QUERY, query);
}
}
public void setInputBoundingQuery(String query) {
if (query != null && query.length() > 0) {
conf.set(DBConfiguration.INPUT_BOUNDING_QUERY, query);
}
}
public String getInputBoundingQuery() {
return conf.get(DBConfiguration.INPUT_BOUNDING_QUERY);
}
public Class<?> getInputClass() {
return conf.getClass(DBConfiguration.INPUT_CLASS_PROPERTY,
NullDBWritable.class);
}
public void setInputClass(Class<? extends DBWritable> inputClass) {
conf.setClass(DBConfiguration.INPUT_CLASS_PROPERTY, inputClass,
DBWritable.class);
}
public String getOutputTableName() {
return conf.get(DBConfiguration.OUTPUT_TABLE_NAME_PROPERTY);
}
public void setOutputTableName(String tableName) {
conf.set(DBConfiguration.OUTPUT_TABLE_NAME_PROPERTY, tableName);
}
public String[] getOutputFieldNames() {
return conf.getStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY);
}
public void setOutputFieldNames(String... fieldNames) {
conf.setStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY, fieldNames);
}
public void setOutputFieldCount(int fieldCount) {
conf.setInt(DBConfiguration.OUTPUT_FIELD_COUNT_PROPERTY, fieldCount);
}
public int getOutputFieldCount() {
return conf.getInt(OUTPUT_FIELD_COUNT_PROPERTY, 0);
}
}
| 8,677 | 31.62406 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/MySQLDataDrivenDBRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* A RecordReader that reads records from a MySQL table via DataDrivenDBRecordReader
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MySQLDataDrivenDBRecordReader<T extends DBWritable>
extends DataDrivenDBRecordReader<T> {
public MySQLDataDrivenDBRecordReader(DBInputFormat.DBInputSplit split,
Class<T> inputClass, Configuration conf, Connection conn, DBConfiguration dbConfig,
String cond, String [] fields, String table) throws SQLException {
super(split, inputClass, conf, conn, dbConfig, cond, fields, table, "MYSQL");
}
// Execute statements for mysql in unbuffered mode.
protected ResultSet executeQuery(String query) throws SQLException {
statement = getConnection().prepareStatement(query,
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
statement.setFetchSize(Integer.MIN_VALUE); // MySQL: read row-at-a-time.
return statement.executeQuery();
}
}
| 2,083 | 39.076923 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* Implement DBSplitter over integer values.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class IntegerSplitter implements DBSplitter {
public List<InputSplit> split(Configuration conf, ResultSet results, String colName)
throws SQLException {
long minVal = results.getLong(1);
long maxVal = results.getLong(2);
String lowClausePrefix = colName + " >= ";
String highClausePrefix = colName + " < ";
int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
if (numSplits < 1) {
numSplits = 1;
}
if (results.getString(1) == null && results.getString(2) == null) {
// Range is null to null. Return a null split accordingly.
List<InputSplit> splits = new ArrayList<InputSplit>();
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
return splits;
}
// Get all the split points together.
List<Long> splitPoints = split(numSplits, minVal, maxVal);
List<InputSplit> splits = new ArrayList<InputSplit>();
// Turn the split points into a set of intervals.
long start = splitPoints.get(0);
for (int i = 1; i < splitPoints.size(); i++) {
long end = splitPoints.get(i);
if (i == splitPoints.size() - 1) {
// This is the last one; use a closed interval.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + Long.toString(start),
colName + " <= " + Long.toString(end)));
} else {
// Normal open-interval case.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + Long.toString(start),
highClausePrefix + Long.toString(end)));
}
start = end;
}
if (results.getString(1) == null || results.getString(2) == null) {
// At least one extrema is null; add a null split.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
}
return splits;
}
/**
* Returns a list of longs one element longer than the list of input splits.
* This represents the boundaries between input splits.
* All splits are open on the top end, except the last one.
*
* So the list [0, 5, 8, 12, 18] would represent splits capturing the intervals:
*
* [0, 5)
* [5, 8)
* [8, 12)
* [12, 18] note the closed interval for the last split.
*/
List<Long> split(long numSplits, long minVal, long maxVal)
throws SQLException {
List<Long> splits = new ArrayList<Long>();
// Use numSplits as a hint. May need an extra task if the size doesn't
// divide cleanly.
long splitSize = (maxVal - minVal) / numSplits;
if (splitSize < 1) {
splitSize = 1;
}
long curVal = minVal;
while (curVal <= maxVal) {
splits.add(curVal);
curVal += splitSize;
}
if (splits.get(splits.size() - 1) != maxVal || splits.size() == 1) {
// We didn't end on the maxVal. Add that to the end of the list.
splits.add(maxVal);
}
return splits;
}
}
| 4,362 | 31.804511 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* A RecordReader that reads records from a Oracle table via DataDrivenDBRecordReader
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class OracleDataDrivenDBRecordReader<T extends DBWritable>
extends DataDrivenDBRecordReader<T> {
public OracleDataDrivenDBRecordReader(DBInputFormat.DBInputSplit split,
Class<T> inputClass, Configuration conf, Connection conn,
DBConfiguration dbConfig, String cond, String [] fields,
String table) throws SQLException {
super(split, inputClass, conf, conn, dbConfig, cond, fields, table,
"ORACLE");
// Must initialize the tz used by the connection for Oracle.
OracleDBRecordReader.setSessionTimeZone(conf, conn);
}
}
| 1,861 | 36.24 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* Implement DBSplitter over text strings.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class TextSplitter extends BigDecimalSplitter {
private static final Log LOG = LogFactory.getLog(TextSplitter.class);
/**
* This method needs to determine the splits between two user-provided strings.
* In the case where the user's strings are 'A' and 'Z', this is not hard; we
* could create two splits from ['A', 'M') and ['M', 'Z'], 26 splits for strings
* beginning with each letter, etc.
*
* If a user has provided us with the strings "Ham" and "Haze", however, we need
* to create splits that differ in the third letter.
*
* The algorithm used is as follows:
* Since there are 2**16 unicode characters, we interpret characters as digits in
* base 65536. Given a string 's' containing characters s_0, s_1 .. s_n, we interpret
* the string as the number: 0.s_0 s_1 s_2.. s_n in base 65536. Having mapped the
* low and high strings into floating-point values, we then use the BigDecimalSplitter
* to establish the even split points, then map the resulting floating point values
* back into strings.
*/
public List<InputSplit> split(Configuration conf, ResultSet results, String colName)
throws SQLException {
LOG.warn("Generating splits for a textual index column.");
LOG.warn("If your database sorts in a case-insensitive order, "
+ "this may result in a partial import or duplicate records.");
LOG.warn("You are strongly encouraged to choose an integral split column.");
String minString = results.getString(1);
String maxString = results.getString(2);
boolean minIsNull = false;
// If the min value is null, switch it to an empty string instead for purposes
// of interpolation. Then add [null, null] as a special case split.
if (null == minString) {
minString = "";
minIsNull = true;
}
if (null == maxString) {
// If the max string is null, then the min string has to be null too.
// Just return a special split for this case.
List<InputSplit> splits = new ArrayList<InputSplit>();
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
return splits;
}
// Use this as a hint. May need an extra task if the size doesn't
// divide cleanly.
int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
String lowClausePrefix = colName + " >= '";
String highClausePrefix = colName + " < '";
// If there is a common prefix between minString and maxString, establish it
// and pull it out of minString and maxString.
int maxPrefixLen = Math.min(minString.length(), maxString.length());
int sharedLen;
for (sharedLen = 0; sharedLen < maxPrefixLen; sharedLen++) {
char c1 = minString.charAt(sharedLen);
char c2 = maxString.charAt(sharedLen);
if (c1 != c2) {
break;
}
}
// The common prefix has length 'sharedLen'. Extract it from both.
String commonPrefix = minString.substring(0, sharedLen);
minString = minString.substring(sharedLen);
maxString = maxString.substring(sharedLen);
List<String> splitStrings = split(numSplits, minString, maxString, commonPrefix);
List<InputSplit> splits = new ArrayList<InputSplit>();
// Convert the list of split point strings into an actual set of InputSplits.
String start = splitStrings.get(0);
for (int i = 1; i < splitStrings.size(); i++) {
String end = splitStrings.get(i);
if (i == splitStrings.size() - 1) {
// This is the last one; use a closed interval.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + start + "'", colName + " <= '" + end + "'"));
} else {
// Normal open-interval case.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + start + "'", highClausePrefix + end + "'"));
}
}
if (minIsNull) {
// Add the special null split at the end.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
}
return splits;
}
List<String> split(int numSplits, String minString, String maxString, String commonPrefix)
throws SQLException {
BigDecimal minVal = stringToBigDecimal(minString);
BigDecimal maxVal = stringToBigDecimal(maxString);
List<BigDecimal> splitPoints = split(new BigDecimal(numSplits), minVal, maxVal);
List<String> splitStrings = new ArrayList<String>();
// Convert the BigDecimal splitPoints into their string representations.
for (BigDecimal bd : splitPoints) {
splitStrings.add(commonPrefix + bigDecimalToString(bd));
}
// Make sure that our user-specified boundaries are the first and last entries
// in the array.
if (splitStrings.size() == 0 || !splitStrings.get(0).equals(commonPrefix + minString)) {
splitStrings.add(0, commonPrefix + minString);
}
if (splitStrings.size() == 1
|| !splitStrings.get(splitStrings.size() - 1).equals(commonPrefix + maxString)) {
splitStrings.add(commonPrefix + maxString);
}
return splitStrings;
}
private final static BigDecimal ONE_PLACE = new BigDecimal(65536);
// Maximum number of characters to convert. This is to prevent rounding errors
// or repeating fractions near the very bottom from getting out of control. Note
// that this still gives us a huge number of possible splits.
private final static int MAX_CHARS = 8;
/**
* Return a BigDecimal representation of string 'str' suitable for use
* in a numerically-sorting order.
*/
BigDecimal stringToBigDecimal(String str) {
BigDecimal result = BigDecimal.ZERO;
BigDecimal curPlace = ONE_PLACE; // start with 1/65536 to compute the first digit.
int len = Math.min(str.length(), MAX_CHARS);
for (int i = 0; i < len; i++) {
int codePoint = str.codePointAt(i);
result = result.add(tryDivide(new BigDecimal(codePoint), curPlace));
// advance to the next less significant place. e.g., 1/(65536^2) for the second char.
curPlace = curPlace.multiply(ONE_PLACE);
}
return result;
}
/**
* Return the string encoded in a BigDecimal.
* Repeatedly multiply the input value by 65536; the integer portion after such a multiplication
* represents a single character in base 65536. Convert that back into a char and create a
* string out of these until we have no data left.
*/
String bigDecimalToString(BigDecimal bd) {
BigDecimal cur = bd.stripTrailingZeros();
StringBuilder sb = new StringBuilder();
for (int numConverted = 0; numConverted < MAX_CHARS; numConverted++) {
cur = cur.multiply(ONE_PLACE);
int curCodePoint = cur.intValue();
if (0 == curCodePoint) {
break;
}
cur = cur.subtract(new BigDecimal(curCodePoint));
sb.append(Character.toChars(curCodePoint));
}
return sb.toString();
}
}
| 8,457 | 37.099099 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Time;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* Implement DBSplitter over date/time values.
* Make use of logic from IntegerSplitter, since date/time are just longs
* in Java.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DateSplitter extends IntegerSplitter {
private static final Log LOG = LogFactory.getLog(DateSplitter.class);
public List<InputSplit> split(Configuration conf, ResultSet results, String colName)
throws SQLException {
long minVal;
long maxVal;
int sqlDataType = results.getMetaData().getColumnType(1);
minVal = resultSetColToLong(results, 1, sqlDataType);
maxVal = resultSetColToLong(results, 2, sqlDataType);
String lowClausePrefix = colName + " >= ";
String highClausePrefix = colName + " < ";
int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
if (numSplits < 1) {
numSplits = 1;
}
if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) {
// The range of acceptable dates is NULL to NULL. Just create a single split.
List<InputSplit> splits = new ArrayList<InputSplit>();
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
return splits;
}
// Gather the split point integers
List<Long> splitPoints = split(numSplits, minVal, maxVal);
List<InputSplit> splits = new ArrayList<InputSplit>();
// Turn the split points into a set of intervals.
long start = splitPoints.get(0);
Date startDate = longToDate(start, sqlDataType);
if (sqlDataType == Types.TIMESTAMP) {
// The lower bound's nanos value needs to match the actual lower-bound nanos.
try {
((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos());
} catch (NullPointerException npe) {
// If the lower bound was NULL, we'll get an NPE; just ignore it and don't set nanos.
}
}
for (int i = 1; i < splitPoints.size(); i++) {
long end = splitPoints.get(i);
Date endDate = longToDate(end, sqlDataType);
if (i == splitPoints.size() - 1) {
if (sqlDataType == Types.TIMESTAMP) {
// The upper bound's nanos value needs to match the actual upper-bound nanos.
try {
((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos());
} catch (NullPointerException npe) {
// If the upper bound was NULL, we'll get an NPE; just ignore it and don't set nanos.
}
}
// This is the last one; use a closed interval.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + dateToString(startDate),
colName + " <= " + dateToString(endDate)));
} else {
// Normal open-interval case.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
lowClausePrefix + dateToString(startDate),
highClausePrefix + dateToString(endDate)));
}
start = end;
startDate = endDate;
}
if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) {
// Add an extra split to handle the null case that we saw.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
colName + " IS NULL", colName + " IS NULL"));
}
return splits;
}
/** Retrieve the value from the column in a type-appropriate manner and return
its timestamp since the epoch. If the column is null, then return Long.MIN_VALUE.
This will cause a special split to be generated for the NULL case, but may also
cause poorly-balanced splits if most of the actual dates are positive time
since the epoch, etc.
*/
private long resultSetColToLong(ResultSet rs, int colNum, int sqlDataType) throws SQLException {
try {
switch (sqlDataType) {
case Types.DATE:
return rs.getDate(colNum).getTime();
case Types.TIME:
return rs.getTime(colNum).getTime();
case Types.TIMESTAMP:
return rs.getTimestamp(colNum).getTime();
default:
throw new SQLException("Not a date-type field");
}
} catch (NullPointerException npe) {
// null column. return minimum long value.
LOG.warn("Encountered a NULL date in the split column. Splits may be poorly balanced.");
return Long.MIN_VALUE;
}
}
/** Parse the long-valued timestamp into the appropriate SQL date type. */
private Date longToDate(long val, int sqlDataType) {
switch (sqlDataType) {
case Types.DATE:
return new java.sql.Date(val);
case Types.TIME:
return new java.sql.Time(val);
case Types.TIMESTAMP:
return new java.sql.Timestamp(val);
default: // Shouldn't ever hit this case.
return null;
}
}
/**
* Given a Date 'd', format it as a string for use in a SQL date
* comparison operation.
* @param d the date to format.
* @return the string representing this date in SQL with any appropriate
* quotation characters, etc.
*/
protected String dateToString(Date d) {
return "'" + d.toString() + "'";
}
}
| 6,493 | 35.27933 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
/**
* DBSplitter will generate DBInputSplits to use with DataDrivenDBInputFormat.
* DataDrivenDBInputFormat needs to interpolate between two values that
* represent the lowest and highest valued records to import. Depending
* on the data-type of the column, this requires different behavior.
* DBSplitter implementations should perform this for a data type or family
* of data types.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface DBSplitter {
/**
* Given a ResultSet containing one record (and already advanced to that record)
* with two columns (a low value, and a high value, both of the same type), determine
* a set of splits that span the given values.
*/
List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException;
}
| 1,960 | 39.854167 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDBRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.lang.reflect.Method;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A RecordReader that reads records from an Oracle SQL table.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class OracleDBRecordReader<T extends DBWritable> extends DBRecordReader<T> {
/** Configuration key to set to a timezone string. */
public static final String SESSION_TIMEZONE_KEY = "oracle.sessionTimeZone";
private static final Log LOG = LogFactory.getLog(OracleDBRecordReader.class);
public OracleDBRecordReader(DBInputFormat.DBInputSplit split,
Class<T> inputClass, Configuration conf, Connection conn, DBConfiguration dbConfig,
String cond, String [] fields, String table) throws SQLException {
super(split, inputClass, conf, conn, dbConfig, cond, fields, table);
setSessionTimeZone(conf, conn);
}
/** Returns the query for selecting the records from an Oracle DB. */
protected String getSelectQuery() {
StringBuilder query = new StringBuilder();
DBConfiguration dbConf = getDBConf();
String conditions = getConditions();
String tableName = getTableName();
String [] fieldNames = getFieldNames();
// Oracle-specific codepath to use rownum instead of LIMIT/OFFSET.
if(dbConf.getInputQuery() == null) {
query.append("SELECT ");
for (int i = 0; i < fieldNames.length; i++) {
query.append(fieldNames[i]);
if (i != fieldNames.length -1) {
query.append(", ");
}
}
query.append(" FROM ").append(tableName);
if (conditions != null && conditions.length() > 0)
query.append(" WHERE ").append(conditions);
String orderBy = dbConf.getInputOrderBy();
if (orderBy != null && orderBy.length() > 0) {
query.append(" ORDER BY ").append(orderBy);
}
} else {
//PREBUILT QUERY
query.append(dbConf.getInputQuery());
}
try {
DBInputFormat.DBInputSplit split = getSplit();
if (split.getLength() > 0){
String querystring = query.toString();
query = new StringBuilder();
query.append("SELECT * FROM (SELECT a.*,ROWNUM dbif_rno FROM ( ");
query.append(querystring);
query.append(" ) a WHERE rownum <= ").append(split.getEnd());
query.append(" ) WHERE dbif_rno > ").append(split.getStart());
}
} catch (IOException ex) {
// ignore, will not throw.
}
return query.toString();
}
/**
* Set session time zone
* @param conf The current configuration.
* We read the 'oracle.sessionTimeZone' property from here.
* @param conn The connection to alter the timezone properties of.
*/
public static void setSessionTimeZone(Configuration conf,
Connection conn) throws SQLException {
// need to use reflection to call the method setSessionTimeZone on
// the OracleConnection class because oracle specific java libraries are
// not accessible in this context.
Method method;
try {
method = conn.getClass().getMethod(
"setSessionTimeZone", new Class [] {String.class});
} catch (Exception ex) {
LOG.error("Could not find method setSessionTimeZone in " + conn.getClass().getName(), ex);
// rethrow SQLException
throw new SQLException(ex);
}
// Need to set the time zone in order for Java
// to correctly access the column "TIMESTAMP WITH LOCAL TIME ZONE".
// We can't easily get the correct Oracle-specific timezone string
// from Java; just let the user set the timezone in a property.
String clientTimeZone = conf.get(SESSION_TIMEZONE_KEY, "GMT");
try {
method.setAccessible(true);
method.invoke(conn, clientTimeZone);
LOG.info("Time zone has been set to " + clientTimeZone);
} catch (Exception ex) {
LOG.warn("Time zone " + clientTimeZone +
" could not be set on Oracle database.");
LOG.warn("Setting default time zone: GMT");
try {
// "GMT" timezone is guaranteed to exist.
method.invoke(conn, "GMT");
} catch (Exception ex2) {
LOG.error("Could not set time zone for oracle connection", ex2);
// rethrow SQLException
throw new SQLException(ex);
}
}
}
}
| 5,404 | 36.275862 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/MySQLDBRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* A RecordReader that reads records from a MySQL table.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MySQLDBRecordReader<T extends DBWritable> extends DBRecordReader<T> {
public MySQLDBRecordReader(DBInputFormat.DBInputSplit split,
Class<T> inputClass, Configuration conf, Connection conn, DBConfiguration dbConfig,
String cond, String [] fields, String table) throws SQLException {
super(split, inputClass, conf, conn, dbConfig, cond, fields, table);
}
// Execute statements for mysql in unbuffered mode.
protected ResultSet executeQuery(String query) throws SQLException {
statement = getConnection().prepareStatement(query,
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
statement.setFetchSize(Integer.MIN_VALUE); // MySQL: read row-at-a-time.
return statement.executeQuery();
}
}
| 2,013 | 38.490196 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDateSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Implement DBSplitter over date/time values returned by an Oracle db.
* Make use of logic from DateSplitter, since this just needs to use
* some Oracle-specific functions on the formatting end when generating
* InputSplits.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class OracleDateSplitter extends DateSplitter {
@SuppressWarnings("unchecked")
@Override
protected String dateToString(Date d) {
// Oracle Data objects are always actually Timestamps
return "TO_TIMESTAMP('" + d.toString() + "', 'YYYY-MM-DD HH24:MI:SS.FF')";
}
}
| 1,575 | 35.651163 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
/**
* A InputFormat that reads input data from an SQL table.
* Operates like DBInputFormat, but instead of using LIMIT and OFFSET to demarcate
* splits, it tries to generate WHERE clauses which separate the data into roughly
* equivalent shards.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DataDrivenDBInputFormat<T extends DBWritable>
extends DBInputFormat<T> implements Configurable {
private static final Log LOG = LogFactory.getLog(DataDrivenDBInputFormat.class);
/** If users are providing their own query, the following string is expected to
appear in the WHERE clause, which will be substituted with a pair of conditions
on the input to allow input splits to parallelise the import. */
public static final String SUBSTITUTE_TOKEN = "$CONDITIONS";
/**
* A InputSplit that spans a set of rows
*/
@InterfaceStability.Evolving
public static class DataDrivenDBInputSplit extends DBInputFormat.DBInputSplit {
private String lowerBoundClause;
private String upperBoundClause;
/**
* Default Constructor
*/
public DataDrivenDBInputSplit() {
}
/**
* Convenience Constructor
* @param lower the string to be put in the WHERE clause to guard on the 'lower' end
* @param upper the string to be put in the WHERE clause to guard on the 'upper' end
*/
public DataDrivenDBInputSplit(final String lower, final String upper) {
this.lowerBoundClause = lower;
this.upperBoundClause = upper;
}
/**
* @return The total row count in this split
*/
public long getLength() throws IOException {
return 0; // unfortunately, we don't know this.
}
/** {@inheritDoc} */
public void readFields(DataInput input) throws IOException {
this.lowerBoundClause = Text.readString(input);
this.upperBoundClause = Text.readString(input);
}
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {
Text.writeString(output, this.lowerBoundClause);
Text.writeString(output, this.upperBoundClause);
}
public String getLowerClause() {
return lowerBoundClause;
}
public String getUpperClause() {
return upperBoundClause;
}
}
/**
* @return the DBSplitter implementation to use to divide the table/query into InputSplits.
*/
protected DBSplitter getSplitter(int sqlDataType) {
switch (sqlDataType) {
case Types.NUMERIC:
case Types.DECIMAL:
return new BigDecimalSplitter();
case Types.BIT:
case Types.BOOLEAN:
return new BooleanSplitter();
case Types.INTEGER:
case Types.TINYINT:
case Types.SMALLINT:
case Types.BIGINT:
return new IntegerSplitter();
case Types.REAL:
case Types.FLOAT:
case Types.DOUBLE:
return new FloatSplitter();
case Types.CHAR:
case Types.VARCHAR:
case Types.LONGVARCHAR:
return new TextSplitter();
case Types.DATE:
case Types.TIME:
case Types.TIMESTAMP:
return new DateSplitter();
default:
// TODO: Support BINARY, VARBINARY, LONGVARBINARY, DISTINCT, CLOB, BLOB, ARRAY
// STRUCT, REF, DATALINK, and JAVA_OBJECT.
return null;
}
}
/** {@inheritDoc} */
public List<InputSplit> getSplits(JobContext job) throws IOException {
int targetNumTasks = job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
if (1 == targetNumTasks) {
// There's no need to run a bounding vals query; just return a split
// that separates nothing. This can be considerably more optimal for a
// large table with no index.
List<InputSplit> singletonSplit = new ArrayList<InputSplit>();
singletonSplit.add(new DataDrivenDBInputSplit("1=1", "1=1"));
return singletonSplit;
}
ResultSet results = null;
Statement statement = null;
try {
statement = connection.createStatement();
results = statement.executeQuery(getBoundingValsQuery());
results.next();
// Based on the type of the results, use a different mechanism
// for interpolating split points (i.e., numeric splits, text splits,
// dates, etc.)
int sqlDataType = results.getMetaData().getColumnType(1);
DBSplitter splitter = getSplitter(sqlDataType);
if (null == splitter) {
throw new IOException("Unknown SQL data type: " + sqlDataType);
}
return splitter.split(job.getConfiguration(), results, getDBConf().getInputOrderBy());
} catch (SQLException e) {
throw new IOException(e.getMessage());
} finally {
// More-or-less ignore SQL exceptions here, but log in case we need it.
try {
if (null != results) {
results.close();
}
} catch (SQLException se) {
LOG.debug("SQLException closing resultset: " + se.toString());
}
try {
if (null != statement) {
statement.close();
}
} catch (SQLException se) {
LOG.debug("SQLException closing statement: " + se.toString());
}
try {
connection.commit();
closeConnection();
} catch (SQLException se) {
LOG.debug("SQLException committing split transaction: " + se.toString());
}
}
}
/**
* @return a query which returns the minimum and maximum values for
* the order-by column.
*
* The min value should be in the first column, and the
* max value should be in the second column of the results.
*/
protected String getBoundingValsQuery() {
// If the user has provided a query, use that instead.
String userQuery = getDBConf().getInputBoundingQuery();
if (null != userQuery) {
return userQuery;
}
// Auto-generate one based on the table name we've been provided with.
StringBuilder query = new StringBuilder();
String splitCol = getDBConf().getInputOrderBy();
query.append("SELECT MIN(").append(splitCol).append("), ");
query.append("MAX(").append(splitCol).append(") FROM ");
query.append(getDBConf().getInputTableName());
String conditions = getDBConf().getInputConditions();
if (null != conditions) {
query.append(" WHERE ( " + conditions + " )");
}
return query.toString();
}
/** Set the user-defined bounding query to use with a user-defined query.
This *must* include the substring "$CONDITIONS"
(DataDrivenDBInputFormat.SUBSTITUTE_TOKEN) inside the WHERE clause,
so that DataDrivenDBInputFormat knows where to insert split clauses.
e.g., "SELECT foo FROM mytable WHERE $CONDITIONS"
This will be expanded to something like:
SELECT foo FROM mytable WHERE (id > 100) AND (id < 250)
inside each split.
*/
public static void setBoundingQuery(Configuration conf, String query) {
if (null != query) {
// If the user's settng a query, warn if they don't allow conditions.
if (query.indexOf(SUBSTITUTE_TOKEN) == -1) {
LOG.warn("Could not find " + SUBSTITUTE_TOKEN + " token in query: " + query
+ "; splits may not partition data.");
}
}
conf.set(DBConfiguration.INPUT_BOUNDING_QUERY, query);
}
protected RecordReader<LongWritable, T> createDBRecordReader(DBInputSplit split,
Configuration conf) throws IOException {
DBConfiguration dbConf = getDBConf();
@SuppressWarnings("unchecked")
Class<T> inputClass = (Class<T>) (dbConf.getInputClass());
String dbProductName = getDBProductName();
LOG.debug("Creating db record reader for db product: " + dbProductName);
try {
// use database product name to determine appropriate record reader.
if (dbProductName.startsWith("MYSQL")) {
// use MySQL-specific db reader.
return new MySQLDataDrivenDBRecordReader<T>(split, inputClass,
conf, createConnection(), dbConf, dbConf.getInputConditions(),
dbConf.getInputFieldNames(), dbConf.getInputTableName());
} else {
// Generic reader.
return new DataDrivenDBRecordReader<T>(split, inputClass,
conf, createConnection(), dbConf, dbConf.getInputConditions(),
dbConf.getInputFieldNames(), dbConf.getInputTableName(),
dbProductName);
}
} catch (SQLException ex) {
throw new IOException(ex.getMessage());
}
}
// Configuration methods override superclass to ensure that the proper
// DataDrivenDBInputFormat gets used.
/** Note that the "orderBy" column is called the "splitBy" in this version.
* We reuse the same field, but it's not strictly ordering it -- just partitioning
* the results.
*/
public static void setInput(Job job,
Class<? extends DBWritable> inputClass,
String tableName,String conditions,
String splitBy, String... fieldNames) {
DBInputFormat.setInput(job, inputClass, tableName, conditions, splitBy, fieldNames);
job.setInputFormatClass(DataDrivenDBInputFormat.class);
}
/** setInput() takes a custom query and a separate "bounding query" to use
instead of the custom "count query" used by DBInputFormat.
*/
public static void setInput(Job job,
Class<? extends DBWritable> inputClass,
String inputQuery, String inputBoundingQuery) {
DBInputFormat.setInput(job, inputClass, inputQuery, "");
job.getConfiguration().set(DBConfiguration.INPUT_BOUNDING_QUERY, inputBoundingQuery);
job.setInputFormatClass(DataDrivenDBInputFormat.class);
}
}
| 11,467 | 33.646526 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.chain;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.chain.Chain.ChainBlockingQueue;
import java.io.IOException;
/**
* The ChainReducer class allows to chain multiple Mapper classes after a
* Reducer within the Reducer task.
*
* <p>
* For each record output by the Reducer, the Mapper classes are invoked in a
* chained (or piped) fashion. The output of the reducer becomes the input of
* the first mapper and output of first becomes the input of the second, and so
* on until the last Mapper, the output of the last Mapper will be written to
* the task's output.
* </p>
* <p>
* The key functionality of this feature is that the Mappers in the chain do not
* need to be aware that they are executed after the Reducer or in a chain. This
* enables having reusable specialized Mappers that can be combined to perform
* composite operations within a single task.
* </p>
* <p>
* Special care has to be taken when creating chains that the key/values output
* by a Mapper are valid for the following Mapper in the chain. It is assumed
* all Mappers and the Reduce in the chain use matching output and input key and
* value classes as no conversion is done by the chaining code.
* </p>
* <p> Using the ChainMapper and the ChainReducer classes is possible to
* compose Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
* immediate benefit of this pattern is a dramatic reduction in disk IO. </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainReducer, this is done by the setReducer or the addMapper for the last
* element in the chain.
* </p>
* ChainReducer usage pattern:
* <p>
*
* <pre>
* ...
* Job = new Job(conf);
* ....
*
* Configuration reduceConf = new Configuration(false);
* ...
* ChainReducer.setReducer(job, XReduce.class, LongWritable.class, Text.class,
* Text.class, Text.class, true, reduceConf);
*
* ChainReducer.addMapper(job, CMap.class, Text.class, Text.class,
* LongWritable.class, Text.class, false, null);
*
* ChainReducer.addMapper(job, DMap.class, LongWritable.class, Text.class,
* LongWritable.class, LongWritable.class, true, null);
*
* ...
*
* job.waitForCompletion(true);
* ...
* </pre>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ChainReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> extends
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
/**
* Sets the {@link Reducer} class to the chain job.
*
* <p>
* The key and values are passed from one element of the chain to the next, by
* value. For the added Reducer the configuration given for it,
* <code>reducerConf</code>, have precedence over the job's Configuration.
* This precedence is in effect when the task is running.
* </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainReducer, this is done by the setReducer or the addMapper for the last
* element in the chain.
* </p>
*
* @param job
* the job
* @param klass
* the Reducer class to add.
* @param inputKeyClass
* reducer input key class.
* @param inputValueClass
* reducer input value class.
* @param outputKeyClass
* reducer output key class.
* @param outputValueClass
* reducer output value class.
* @param reducerConf
* a configuration for the Reducer class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
public static void setReducer(Job job, Class<? extends Reducer> klass,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration reducerConf) {
job.setReducerClass(ChainReducer.class);
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
Chain.setReducer(job, klass, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, reducerConf);
}
/**
* Adds a {@link Mapper} class to the chain reducer.
*
* <p>
* The key and values are passed from one element of the chain to the next, by
* value For the added Mapper the configuration given for it,
* <code>mapperConf</code>, have precedence over the job's Configuration. This
* precedence is in effect when the task is running.
* </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainMapper, this is done by the addMapper for the last mapper in the
* chain.
* </p>
*
* @param job
* The job.
* @param klass
* the Mapper class to add.
* @param inputKeyClass
* mapper input key class.
* @param inputValueClass
* mapper input value class.
* @param outputKeyClass
* mapper output key class.
* @param outputValueClass
* mapper output value class.
* @param mapperConf
* a configuration for the Mapper class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
public static void addMapper(Job job, Class<? extends Mapper> klass,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration mapperConf) throws IOException {
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
Chain.addMapper(false, job, klass, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, mapperConf);
}
private Chain chain;
protected void setup(Context context) {
chain = new Chain(false);
chain.setup(context.getConfiguration());
}
public void run(Context context) throws IOException, InterruptedException {
setup(context);
// if no reducer is set, just do nothing
if (chain.getReducer() == null) {
return;
}
int numMappers = chain.getAllMappers().size();
// if there are no mappers in chain, run the reducer
if (numMappers == 0) {
chain.runReducer(context);
return;
}
// add reducer and all mappers with proper context
ChainBlockingQueue<Chain.KeyValuePair<?, ?>> inputqueue;
ChainBlockingQueue<Chain.KeyValuePair<?, ?>> outputqueue;
// add reducer
outputqueue = chain.createBlockingQueue();
chain.addReducer(context, outputqueue);
// add all mappers except last one
for (int i = 0; i < numMappers - 1; i++) {
inputqueue = outputqueue;
outputqueue = chain.createBlockingQueue();
chain.addMapper(inputqueue, outputqueue, context, i);
}
// add last mapper
chain.addMapper(outputqueue, context, numMappers - 1);
// start all threads
chain.startAllThreads();
// wait for all threads
chain.joinAllThreads();
}
}
| 8,161 | 35.932127 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.chain;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DefaultStringifier;
import org.apache.hadoop.io.Stringifier;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.ReduceContext;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
import org.apache.hadoop.util.ReflectionUtils;
/**
* The Chain class provides all the common functionality for the
* {@link ChainMapper} and the {@link ChainReducer} classes.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class Chain {
protected static final String CHAIN_MAPPER = "mapreduce.chain.mapper";
protected static final String CHAIN_REDUCER = "mapreduce.chain.reducer";
protected static final String CHAIN_MAPPER_SIZE = ".size";
protected static final String CHAIN_MAPPER_CLASS = ".mapper.class.";
protected static final String CHAIN_MAPPER_CONFIG = ".mapper.config.";
protected static final String CHAIN_REDUCER_CLASS = ".reducer.class";
protected static final String CHAIN_REDUCER_CONFIG = ".reducer.config";
protected static final String MAPPER_INPUT_KEY_CLASS =
"mapreduce.chain.mapper.input.key.class";
protected static final String MAPPER_INPUT_VALUE_CLASS =
"mapreduce.chain.mapper.input.value.class";
protected static final String MAPPER_OUTPUT_KEY_CLASS =
"mapreduce.chain.mapper.output.key.class";
protected static final String MAPPER_OUTPUT_VALUE_CLASS =
"mapreduce.chain.mapper.output.value.class";
protected static final String REDUCER_INPUT_KEY_CLASS =
"mapreduce.chain.reducer.input.key.class";
protected static final String REDUCER_INPUT_VALUE_CLASS =
"mapreduce.chain.reducer.input.value.class";
protected static final String REDUCER_OUTPUT_KEY_CLASS =
"mapreduce.chain.reducer.output.key.class";
protected static final String REDUCER_OUTPUT_VALUE_CLASS =
"mapreduce.chain.reducer.output.value.class";
protected boolean isMap;
@SuppressWarnings("unchecked")
private List<Mapper> mappers = new ArrayList<Mapper>();
private Reducer<?, ?, ?, ?> reducer;
private List<Configuration> confList = new ArrayList<Configuration>();
private Configuration rConf;
private List<Thread> threads = new ArrayList<Thread>();
private List<ChainBlockingQueue<?>> blockingQueues =
new ArrayList<ChainBlockingQueue<?>>();
private Throwable throwable = null;
/**
* Creates a Chain instance configured for a Mapper or a Reducer.
*
* @param isMap
* TRUE indicates the chain is for a Mapper, FALSE that is for a
* Reducer.
*/
protected Chain(boolean isMap) {
this.isMap = isMap;
}
static class KeyValuePair<K, V> {
K key;
V value;
boolean endOfInput;
KeyValuePair(K key, V value) {
this.key = key;
this.value = value;
this.endOfInput = false;
}
KeyValuePair(boolean eof) {
this.key = null;
this.value = null;
this.endOfInput = eof;
}
}
// ChainRecordReader either reads from blocking queue or task context.
private static class ChainRecordReader<KEYIN, VALUEIN> extends
RecordReader<KEYIN, VALUEIN> {
private Class<?> keyClass;
private Class<?> valueClass;
private KEYIN key;
private VALUEIN value;
private Configuration conf;
TaskInputOutputContext<KEYIN, VALUEIN, ?, ?> inputContext = null;
ChainBlockingQueue<KeyValuePair<KEYIN, VALUEIN>> inputQueue = null;
// constructor to read from a blocking queue
ChainRecordReader(Class<?> keyClass, Class<?> valueClass,
ChainBlockingQueue<KeyValuePair<KEYIN, VALUEIN>> inputQueue,
Configuration conf) {
this.keyClass = keyClass;
this.valueClass = valueClass;
this.inputQueue = inputQueue;
this.conf = conf;
}
// constructor to read from the context
ChainRecordReader(TaskInputOutputContext<KEYIN, VALUEIN, ?, ?> context) {
inputContext = context;
}
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
}
/**
* Advance to the next key, value pair, returning null if at end.
*
* @return the key object that was read into, or null if no more
*/
public boolean nextKeyValue() throws IOException, InterruptedException {
if (inputQueue != null) {
return readFromQueue();
} else if (inputContext.nextKeyValue()) {
this.key = inputContext.getCurrentKey();
this.value = inputContext.getCurrentValue();
return true;
} else {
return false;
}
}
@SuppressWarnings("unchecked")
private boolean readFromQueue() throws IOException, InterruptedException {
KeyValuePair<KEYIN, VALUEIN> kv = null;
// wait for input on queue
kv = inputQueue.dequeue();
if (kv.endOfInput) {
return false;
}
key = (KEYIN) ReflectionUtils.newInstance(keyClass, conf);
value = (VALUEIN) ReflectionUtils.newInstance(valueClass, conf);
ReflectionUtils.copy(conf, kv.key, this.key);
ReflectionUtils.copy(conf, kv.value, this.value);
return true;
}
/**
* Get the current key.
*
* @return the current key object or null if there isn't one
* @throws IOException
* @throws InterruptedException
*/
public KEYIN getCurrentKey() throws IOException, InterruptedException {
return this.key;
}
/**
* Get the current value.
*
* @return the value object that was read into
* @throws IOException
* @throws InterruptedException
*/
public VALUEIN getCurrentValue() throws IOException, InterruptedException {
return this.value;
}
@Override
public void close() throws IOException {
}
@Override
public float getProgress() throws IOException, InterruptedException {
return 0;
}
}
// ChainRecordWriter either writes to blocking queue or task context
private static class ChainRecordWriter<KEYOUT, VALUEOUT> extends
RecordWriter<KEYOUT, VALUEOUT> {
TaskInputOutputContext<?, ?, KEYOUT, VALUEOUT> outputContext = null;
ChainBlockingQueue<KeyValuePair<KEYOUT, VALUEOUT>> outputQueue = null;
KEYOUT keyout;
VALUEOUT valueout;
Configuration conf;
Class<?> keyClass;
Class<?> valueClass;
// constructor to write to context
ChainRecordWriter(TaskInputOutputContext<?, ?, KEYOUT, VALUEOUT> context) {
outputContext = context;
}
// constructor to write to blocking queue
ChainRecordWriter(Class<?> keyClass, Class<?> valueClass,
ChainBlockingQueue<KeyValuePair<KEYOUT, VALUEOUT>> output,
Configuration conf) {
this.keyClass = keyClass;
this.valueClass = valueClass;
this.outputQueue = output;
this.conf = conf;
}
/**
* Writes a key/value pair.
*
* @param key
* the key to write.
* @param value
* the value to write.
* @throws IOException
*/
public void write(KEYOUT key, VALUEOUT value) throws IOException,
InterruptedException {
if (outputQueue != null) {
writeToQueue(key, value);
} else {
outputContext.write(key, value);
}
}
@SuppressWarnings("unchecked")
private void writeToQueue(KEYOUT key, VALUEOUT value) throws IOException,
InterruptedException {
this.keyout = (KEYOUT) ReflectionUtils.newInstance(keyClass, conf);
this.valueout = (VALUEOUT) ReflectionUtils.newInstance(valueClass, conf);
ReflectionUtils.copy(conf, key, this.keyout);
ReflectionUtils.copy(conf, value, this.valueout);
// wait to write output to queuue
outputQueue.enqueue(new KeyValuePair<KEYOUT, VALUEOUT>(keyout, valueout));
}
/**
* Close this <code>RecordWriter</code> to future operations.
*
* @param context
* the context of the task
* @throws IOException
*/
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
if (outputQueue != null) {
// write end of input
outputQueue.enqueue(new KeyValuePair<KEYOUT, VALUEOUT>(true));
}
}
}
private synchronized Throwable getThrowable() {
return throwable;
}
private synchronized boolean setIfUnsetThrowable(Throwable th) {
if (throwable == null) {
throwable = th;
return true;
}
return false;
}
private class MapRunner<KEYIN, VALUEIN, KEYOUT, VALUEOUT> extends Thread {
private Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapper;
private Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context chainContext;
private RecordReader<KEYIN, VALUEIN> rr;
private RecordWriter<KEYOUT, VALUEOUT> rw;
public MapRunner(Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapper,
Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context mapperContext,
RecordReader<KEYIN, VALUEIN> rr, RecordWriter<KEYOUT, VALUEOUT> rw)
throws IOException, InterruptedException {
this.mapper = mapper;
this.rr = rr;
this.rw = rw;
this.chainContext = mapperContext;
}
@Override
public void run() {
if (getThrowable() != null) {
return;
}
try {
mapper.run(chainContext);
rr.close();
rw.close(chainContext);
} catch (Throwable th) {
if (setIfUnsetThrowable(th)) {
interruptAllThreads();
}
}
}
}
private class ReduceRunner<KEYIN, VALUEIN, KEYOUT, VALUEOUT> extends Thread {
private Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reducer;
private Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context chainContext;
private RecordWriter<KEYOUT, VALUEOUT> rw;
ReduceRunner(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context context,
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reducer,
RecordWriter<KEYOUT, VALUEOUT> rw) throws IOException,
InterruptedException {
this.reducer = reducer;
this.chainContext = context;
this.rw = rw;
}
@Override
public void run() {
try {
reducer.run(chainContext);
rw.close(chainContext);
} catch (Throwable th) {
if (setIfUnsetThrowable(th)) {
interruptAllThreads();
}
}
}
}
Configuration getConf(int index) {
return confList.get(index);
}
/**
* Create a map context that is based on ChainMapContext and the given record
* reader and record writer
*/
private <KEYIN, VALUEIN, KEYOUT, VALUEOUT>
Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createMapContext(
RecordReader<KEYIN, VALUEIN> rr, RecordWriter<KEYOUT, VALUEOUT> rw,
TaskInputOutputContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context,
Configuration conf) {
MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapContext =
new ChainMapContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(
context, rr, rw, conf);
Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context mapperContext =
new WrappedMapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>()
.getMapContext(mapContext);
return mapperContext;
}
@SuppressWarnings("unchecked")
void runMapper(TaskInputOutputContext context, int index) throws IOException,
InterruptedException {
Mapper mapper = mappers.get(index);
RecordReader rr = new ChainRecordReader(context);
RecordWriter rw = new ChainRecordWriter(context);
Mapper.Context mapperContext = createMapContext(rr, rw, context,
getConf(index));
mapper.run(mapperContext);
rr.close();
rw.close(context);
}
/**
* Add mapper(the first mapper) that reads input from the input
* context and writes to queue
*/
@SuppressWarnings("unchecked")
void addMapper(TaskInputOutputContext inputContext,
ChainBlockingQueue<KeyValuePair<?, ?>> output, int index)
throws IOException, InterruptedException {
Configuration conf = getConf(index);
Class<?> keyOutClass = conf.getClass(MAPPER_OUTPUT_KEY_CLASS, Object.class);
Class<?> valueOutClass = conf.getClass(MAPPER_OUTPUT_VALUE_CLASS,
Object.class);
RecordReader rr = new ChainRecordReader(inputContext);
RecordWriter rw = new ChainRecordWriter(keyOutClass, valueOutClass, output,
conf);
Mapper.Context mapperContext = createMapContext(rr, rw,
(MapContext) inputContext, getConf(index));
MapRunner runner = new MapRunner(mappers.get(index), mapperContext, rr, rw);
threads.add(runner);
}
/**
* Add mapper(the last mapper) that reads input from
* queue and writes output to the output context
*/
@SuppressWarnings("unchecked")
void addMapper(ChainBlockingQueue<KeyValuePair<?, ?>> input,
TaskInputOutputContext outputContext, int index) throws IOException,
InterruptedException {
Configuration conf = getConf(index);
Class<?> keyClass = conf.getClass(MAPPER_INPUT_KEY_CLASS, Object.class);
Class<?> valueClass = conf.getClass(MAPPER_INPUT_VALUE_CLASS, Object.class);
RecordReader rr = new ChainRecordReader(keyClass, valueClass, input, conf);
RecordWriter rw = new ChainRecordWriter(outputContext);
MapRunner runner = new MapRunner(mappers.get(index), createMapContext(rr,
rw, outputContext, getConf(index)), rr, rw);
threads.add(runner);
}
/**
* Add mapper that reads and writes from/to the queue
*/
@SuppressWarnings("unchecked")
void addMapper(ChainBlockingQueue<KeyValuePair<?, ?>> input,
ChainBlockingQueue<KeyValuePair<?, ?>> output,
TaskInputOutputContext context, int index) throws IOException,
InterruptedException {
Configuration conf = getConf(index);
Class<?> keyClass = conf.getClass(MAPPER_INPUT_KEY_CLASS, Object.class);
Class<?> valueClass = conf.getClass(MAPPER_INPUT_VALUE_CLASS, Object.class);
Class<?> keyOutClass = conf.getClass(MAPPER_OUTPUT_KEY_CLASS, Object.class);
Class<?> valueOutClass = conf.getClass(MAPPER_OUTPUT_VALUE_CLASS,
Object.class);
RecordReader rr = new ChainRecordReader(keyClass, valueClass, input, conf);
RecordWriter rw = new ChainRecordWriter(keyOutClass, valueOutClass, output,
conf);
MapRunner runner = new MapRunner(mappers.get(index), createMapContext(rr,
rw, context, getConf(index)), rr, rw);
threads.add(runner);
}
/**
* Create a reduce context that is based on ChainMapContext and the given
* record writer
*/
private <KEYIN, VALUEIN, KEYOUT, VALUEOUT>
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createReduceContext(
RecordWriter<KEYOUT, VALUEOUT> rw,
ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context,
Configuration conf) {
ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reduceContext =
new ChainReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(
context, rw, conf);
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context reducerContext =
new WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>()
.getReducerContext(reduceContext);
return reducerContext;
}
// Run the reducer directly.
@SuppressWarnings("unchecked")
<KEYIN, VALUEIN, KEYOUT, VALUEOUT> void runReducer(
TaskInputOutputContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context)
throws IOException, InterruptedException {
RecordWriter<KEYOUT, VALUEOUT> rw = new ChainRecordWriter<KEYOUT, VALUEOUT>(
context);
Reducer.Context reducerContext = createReduceContext(rw,
(ReduceContext) context, rConf);
reducer.run(reducerContext);
rw.close(context);
}
/**
* Add reducer that reads from context and writes to a queue
*/
@SuppressWarnings("unchecked")
void addReducer(TaskInputOutputContext inputContext,
ChainBlockingQueue<KeyValuePair<?, ?>> outputQueue) throws IOException,
InterruptedException {
Class<?> keyOutClass = rConf.getClass(REDUCER_OUTPUT_KEY_CLASS,
Object.class);
Class<?> valueOutClass = rConf.getClass(REDUCER_OUTPUT_VALUE_CLASS,
Object.class);
RecordWriter rw = new ChainRecordWriter(keyOutClass, valueOutClass,
outputQueue, rConf);
Reducer.Context reducerContext = createReduceContext(rw,
(ReduceContext) inputContext, rConf);
ReduceRunner runner = new ReduceRunner(reducerContext, reducer, rw);
threads.add(runner);
}
// start all the threads
void startAllThreads() {
for (Thread thread : threads) {
thread.start();
}
}
// wait till all threads finish
void joinAllThreads() throws IOException, InterruptedException {
for (Thread thread : threads) {
thread.join();
}
Throwable th = getThrowable();
if (th != null) {
if (th instanceof IOException) {
throw (IOException) th;
} else if (th instanceof InterruptedException) {
throw (InterruptedException) th;
} else {
throw new RuntimeException(th);
}
}
}
// interrupt all threads
private synchronized void interruptAllThreads() {
for (Thread th : threads) {
th.interrupt();
}
for (ChainBlockingQueue<?> queue : blockingQueues) {
queue.interrupt();
}
}
/**
* Returns the prefix to use for the configuration of the chain depending if
* it is for a Mapper or a Reducer.
*
* @param isMap
* TRUE for Mapper, FALSE for Reducer.
* @return the prefix to use.
*/
protected static String getPrefix(boolean isMap) {
return (isMap) ? CHAIN_MAPPER : CHAIN_REDUCER;
}
protected static int getIndex(Configuration conf, String prefix) {
return conf.getInt(prefix + CHAIN_MAPPER_SIZE, 0);
}
/**
* Creates a {@link Configuration} for the Map or Reduce in the chain.
*
* <p>
* It creates a new Configuration using the chain job's Configuration as base
* and adds to it the configuration properties for the chain element. The keys
* of the chain element Configuration have precedence over the given
* Configuration.
* </p>
*
* @param jobConf
* the chain job's Configuration.
* @param confKey
* the key for chain element configuration serialized in the chain
* job's Configuration.
* @return a new Configuration aggregating the chain job's Configuration with
* the chain element configuration properties.
*/
protected static Configuration getChainElementConf(Configuration jobConf,
String confKey) {
Configuration conf = null;
try (Stringifier<Configuration> stringifier =
new DefaultStringifier<Configuration>(jobConf, Configuration.class);) {
String confString = jobConf.get(confKey, null);
if (confString != null) {
conf = stringifier.fromString(jobConf.get(confKey, null));
}
} catch (IOException ioex) {
throw new RuntimeException(ioex);
}
// we have to do this because the Writable desearialization clears all
// values set in the conf making not possible do a
// new Configuration(jobConf) in the creation of the conf above
jobConf = new Configuration(jobConf);
if (conf != null) {
for (Map.Entry<String, String> entry : conf) {
jobConf.set(entry.getKey(), entry.getValue());
}
}
return jobConf;
}
/**
* Adds a Mapper class to the chain job.
*
* <p>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Mapper.
*
* @param isMap
* indicates if the Chain is for a Mapper or for a Reducer.
* @param job
* chain job.
* @param klass
* the Mapper class to add.
* @param inputKeyClass
* mapper input key class.
* @param inputValueClass
* mapper input value class.
* @param outputKeyClass
* mapper output key class.
* @param outputValueClass
* mapper output value class.
* @param mapperConf
* a configuration for the Mapper class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
@SuppressWarnings("unchecked")
protected static void addMapper(boolean isMap, Job job,
Class<? extends Mapper> klass, Class<?> inputKeyClass,
Class<?> inputValueClass, Class<?> outputKeyClass,
Class<?> outputValueClass, Configuration mapperConf) {
String prefix = getPrefix(isMap);
Configuration jobConf = job.getConfiguration();
// if a reducer chain check the Reducer has been already set
checkReducerAlreadySet(isMap, jobConf, prefix, true);
// set the mapper class
int index = getIndex(jobConf, prefix);
jobConf.setClass(prefix + CHAIN_MAPPER_CLASS + index, klass, Mapper.class);
validateKeyValueTypes(isMap, jobConf, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, index, prefix);
setMapperConf(isMap, jobConf, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, mapperConf, index, prefix);
}
// if a reducer chain check the Reducer has been already set or not
protected static void checkReducerAlreadySet(boolean isMap,
Configuration jobConf, String prefix, boolean shouldSet) {
if (!isMap) {
if (shouldSet) {
if (jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null) == null) {
throw new IllegalStateException(
"A Mapper can be added to the chain only after the Reducer has "
+ "been set");
}
} else {
if (jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null) != null) {
throw new IllegalStateException("Reducer has been already set");
}
}
}
}
protected static void validateKeyValueTypes(boolean isMap,
Configuration jobConf, Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass, int index,
String prefix) {
// if it is a reducer chain and the first Mapper is being added check the
// key and value input classes of the mapper match those of the reducer
// output.
if (!isMap && index == 0) {
Configuration reducerConf = getChainElementConf(jobConf, prefix
+ CHAIN_REDUCER_CONFIG);
if (!inputKeyClass.isAssignableFrom(reducerConf.getClass(
REDUCER_OUTPUT_KEY_CLASS, null))) {
throw new IllegalArgumentException("The Reducer output key class does"
+ " not match the Mapper input key class");
}
if (!inputValueClass.isAssignableFrom(reducerConf.getClass(
REDUCER_OUTPUT_VALUE_CLASS, null))) {
throw new IllegalArgumentException("The Reducer output value class"
+ " does not match the Mapper input value class");
}
} else if (index > 0) {
// check the that the new Mapper in the chain key and value input classes
// match those of the previous Mapper output.
Configuration previousMapperConf = getChainElementConf(jobConf, prefix
+ CHAIN_MAPPER_CONFIG + (index - 1));
if (!inputKeyClass.isAssignableFrom(previousMapperConf.getClass(
MAPPER_OUTPUT_KEY_CLASS, null))) {
throw new IllegalArgumentException("The specified Mapper input key class does"
+ " not match the previous Mapper's output key class.");
}
if (!inputValueClass.isAssignableFrom(previousMapperConf.getClass(
MAPPER_OUTPUT_VALUE_CLASS, null))) {
throw new IllegalArgumentException("The specified Mapper input value class"
+ " does not match the previous Mapper's output value class.");
}
}
}
protected static void setMapperConf(boolean isMap, Configuration jobConf,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration mapperConf, int index, String prefix) {
// if the Mapper does not have a configuration, create an empty one
if (mapperConf == null) {
// using a Configuration without defaults to make it lightweight.
// still the chain's conf may have all defaults and this conf is
// overlapped to the chain configuration one.
mapperConf = new Configuration(true);
}
// store the input/output classes of the mapper in the mapper conf
mapperConf.setClass(MAPPER_INPUT_KEY_CLASS, inputKeyClass, Object.class);
mapperConf
.setClass(MAPPER_INPUT_VALUE_CLASS, inputValueClass, Object.class);
mapperConf.setClass(MAPPER_OUTPUT_KEY_CLASS, outputKeyClass, Object.class);
mapperConf.setClass(MAPPER_OUTPUT_VALUE_CLASS, outputValueClass,
Object.class);
// serialize the mapper configuration in the chain configuration.
Stringifier<Configuration> stringifier =
new DefaultStringifier<Configuration>(jobConf, Configuration.class);
try {
jobConf.set(prefix + CHAIN_MAPPER_CONFIG + index, stringifier
.toString(new Configuration(mapperConf)));
} catch (IOException ioEx) {
throw new RuntimeException(ioEx);
}
// increment the chain counter
jobConf.setInt(prefix + CHAIN_MAPPER_SIZE, index + 1);
}
/**
* Sets the Reducer class to the chain job.
*
* <p>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Reducer.
*
* @param job
* the chain job.
* @param klass
* the Reducer class to add.
* @param inputKeyClass
* reducer input key class.
* @param inputValueClass
* reducer input value class.
* @param outputKeyClass
* reducer output key class.
* @param outputValueClass
* reducer output value class.
* @param reducerConf
* a configuration for the Reducer class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
@SuppressWarnings("unchecked")
protected static void setReducer(Job job, Class<? extends Reducer> klass,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration reducerConf) {
String prefix = getPrefix(false);
Configuration jobConf = job.getConfiguration();
checkReducerAlreadySet(false, jobConf, prefix, false);
jobConf.setClass(prefix + CHAIN_REDUCER_CLASS, klass, Reducer.class);
setReducerConf(jobConf, inputKeyClass, inputValueClass, outputKeyClass,
outputValueClass, reducerConf, prefix);
}
protected static void setReducerConf(Configuration jobConf,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration reducerConf, String prefix) {
// if the Reducer does not have a Configuration, create an empty one
if (reducerConf == null) {
// using a Configuration without defaults to make it lightweight.
// still the chain's conf may have all defaults and this conf is
// overlapped to the chain's Configuration one.
reducerConf = new Configuration(false);
}
// store the input/output classes of the reducer in
// the reducer configuration
reducerConf.setClass(REDUCER_INPUT_KEY_CLASS, inputKeyClass, Object.class);
reducerConf.setClass(REDUCER_INPUT_VALUE_CLASS, inputValueClass,
Object.class);
reducerConf
.setClass(REDUCER_OUTPUT_KEY_CLASS, outputKeyClass, Object.class);
reducerConf.setClass(REDUCER_OUTPUT_VALUE_CLASS, outputValueClass,
Object.class);
// serialize the reducer configuration in the chain's configuration.
Stringifier<Configuration> stringifier =
new DefaultStringifier<Configuration>(jobConf, Configuration.class);
try {
jobConf.set(prefix + CHAIN_REDUCER_CONFIG, stringifier
.toString(new Configuration(reducerConf)));
} catch (IOException ioEx) {
throw new RuntimeException(ioEx);
}
}
/**
* Setup the chain.
*
* @param jobConf
* chain job's {@link Configuration}.
*/
@SuppressWarnings("unchecked")
void setup(Configuration jobConf) {
String prefix = getPrefix(isMap);
int index = jobConf.getInt(prefix + CHAIN_MAPPER_SIZE, 0);
for (int i = 0; i < index; i++) {
Class<? extends Mapper> klass = jobConf.getClass(prefix
+ CHAIN_MAPPER_CLASS + i, null, Mapper.class);
Configuration mConf = getChainElementConf(jobConf, prefix
+ CHAIN_MAPPER_CONFIG + i);
confList.add(mConf);
Mapper mapper = ReflectionUtils.newInstance(klass, mConf);
mappers.add(mapper);
}
Class<? extends Reducer> klass = jobConf.getClass(prefix
+ CHAIN_REDUCER_CLASS, null, Reducer.class);
if (klass != null) {
rConf = getChainElementConf(jobConf, prefix + CHAIN_REDUCER_CONFIG);
reducer = ReflectionUtils.newInstance(klass, rConf);
}
}
@SuppressWarnings("unchecked")
List<Mapper> getAllMappers() {
return mappers;
}
/**
* Returns the Reducer instance in the chain.
*
* @return the Reducer instance in the chain or NULL if none.
*/
Reducer<?, ?, ?, ?> getReducer() {
return reducer;
}
/**
* Creates a ChainBlockingQueue with KeyValuePair as element
*
* @return the ChainBlockingQueue
*/
ChainBlockingQueue<KeyValuePair<?, ?>> createBlockingQueue() {
return new ChainBlockingQueue<KeyValuePair<?, ?>>();
}
/**
* A blocking queue with one element.
*
* @param <E>
*/
class ChainBlockingQueue<E> {
E element = null;
boolean isInterrupted = false;
ChainBlockingQueue() {
blockingQueues.add(this);
}
synchronized void enqueue(E e) throws InterruptedException {
while (element != null) {
if (isInterrupted) {
throw new InterruptedException();
}
this.wait();
}
element = e;
this.notify();
}
synchronized E dequeue() throws InterruptedException {
while (element == null) {
if (isInterrupted) {
throw new InterruptedException();
}
this.wait();
}
E e = element;
element = null;
this.notify();
return e;
}
synchronized void interrupt() {
isInterrupted = true;
this.notifyAll();
}
}
}
| 31,976 | 34.255788 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.chain;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.chain.Chain.ChainBlockingQueue;
/**
* The ChainMapper class allows to use multiple Mapper classes within a single
* Map task.
*
* <p>
* The Mapper classes are invoked in a chained (or piped) fashion, the output of
* the first becomes the input of the second, and so on until the last Mapper,
* the output of the last Mapper will be written to the task's output.
* </p>
* <p>
* The key functionality of this feature is that the Mappers in the chain do not
* need to be aware that they are executed in a chain. This enables having
* reusable specialized Mappers that can be combined to perform composite
* operations within a single task.
* </p>
* <p>
* Special care has to be taken when creating chains that the key/values output
* by a Mapper are valid for the following Mapper in the chain. It is assumed
* all Mappers and the Reduce in the chain use matching output and input key and
* value classes as no conversion is done by the chaining code.
* </p>
* <p>
* Using the ChainMapper and the ChainReducer classes is possible to compose
* Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
* immediate benefit of this pattern is a dramatic reduction in disk IO.
* </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainMapper, this is done by the addMapper for the last mapper in the chain.
* </p>
* ChainMapper usage pattern:
* <p>
*
* <pre>
* ...
* Job = new Job(conf);
*
* Configuration mapAConf = new Configuration(false);
* ...
* ChainMapper.addMapper(job, AMap.class, LongWritable.class, Text.class,
* Text.class, Text.class, true, mapAConf);
*
* Configuration mapBConf = new Configuration(false);
* ...
* ChainMapper.addMapper(job, BMap.class, Text.class, Text.class,
* LongWritable.class, Text.class, false, mapBConf);
*
* ...
*
* job.waitForComplettion(true);
* ...
* </pre>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ChainMapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> extends
Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
/**
* Adds a {@link Mapper} class to the chain mapper.
*
* <p>
* The key and values are passed from one element of the chain to the next, by
* value. For the added Mapper the configuration given for it,
* <code>mapperConf</code>, have precedence over the job's Configuration. This
* precedence is in effect when the task is running.
* </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainMapper, this is done by the addMapper for the last mapper in the chain
* </p>
*
* @param job
* The job.
* @param klass
* the Mapper class to add.
* @param inputKeyClass
* mapper input key class.
* @param inputValueClass
* mapper input value class.
* @param outputKeyClass
* mapper output key class.
* @param outputValueClass
* mapper output value class.
* @param mapperConf
* a configuration for the Mapper class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
public static void addMapper(Job job, Class<? extends Mapper> klass,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration mapperConf) throws IOException {
job.setMapperClass(ChainMapper.class);
job.setMapOutputKeyClass(outputKeyClass);
job.setMapOutputValueClass(outputValueClass);
Chain.addMapper(true, job, klass, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, mapperConf);
}
private Chain chain;
protected void setup(Context context) {
chain = new Chain(true);
chain.setup(context.getConfiguration());
}
public void run(Context context) throws IOException, InterruptedException {
setup(context);
int numMappers = chain.getAllMappers().size();
if (numMappers == 0) {
return;
}
ChainBlockingQueue<Chain.KeyValuePair<?, ?>> inputqueue;
ChainBlockingQueue<Chain.KeyValuePair<?, ?>> outputqueue;
if (numMappers == 1) {
chain.runMapper(context, 0);
} else {
// add all the mappers with proper context
// add first mapper
outputqueue = chain.createBlockingQueue();
chain.addMapper(context, outputqueue, 0);
// add other mappers
for (int i = 1; i < numMappers - 1; i++) {
inputqueue = outputqueue;
outputqueue = chain.createBlockingQueue();
chain.addMapper(inputqueue, outputqueue, context, i);
}
// add last mapper
chain.addMapper(outputqueue, context, numMappers - 1);
}
// start all threads
chain.startAllThreads();
// wait for all threads
chain.joinAllThreads();
}
}
| 6,103 | 34.488372 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReduceContextImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.chain;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.ReduceContext;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.security.Credentials;
/**
* A simple wrapper class that delegates most of its functionality to the
* underlying context, but overrides the methods to do with record writer and
* configuration
*/
class ChainReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT> implements
ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
private final ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> base;
private final RecordWriter<KEYOUT, VALUEOUT> rw;
private final Configuration conf;
public ChainReduceContextImpl(
ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> base,
RecordWriter<KEYOUT, VALUEOUT> output, Configuration conf) {
this.base = base;
this.rw = output;
this.conf = conf;
}
@Override
public Iterable<VALUEIN> getValues() throws IOException, InterruptedException {
return base.getValues();
}
@Override
public boolean nextKey() throws IOException, InterruptedException {
return base.nextKey();
}
@Override
public Counter getCounter(Enum<?> counterName) {
return base.getCounter(counterName);
}
@Override
public Counter getCounter(String groupName, String counterName) {
return base.getCounter(groupName, counterName);
}
@Override
public KEYIN getCurrentKey() throws IOException, InterruptedException {
return base.getCurrentKey();
}
@Override
public VALUEIN getCurrentValue() throws IOException, InterruptedException {
return base.getCurrentValue();
}
@Override
public OutputCommitter getOutputCommitter() {
return base.getOutputCommitter();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return base.nextKeyValue();
}
@Override
public void write(KEYOUT key, VALUEOUT value) throws IOException,
InterruptedException {
rw.write(key, value);
}
@Override
public String getStatus() {
return base.getStatus();
}
@Override
public TaskAttemptID getTaskAttemptID() {
return base.getTaskAttemptID();
}
@Override
public void setStatus(String msg) {
base.setStatus(msg);
}
@Override
public Path[] getArchiveClassPaths() {
return base.getArchiveClassPaths();
}
@Override
public String[] getArchiveTimestamps() {
return base.getArchiveTimestamps();
}
@Override
public URI[] getCacheArchives() throws IOException {
return base.getCacheArchives();
}
@Override
public URI[] getCacheFiles() throws IOException {
return base.getCacheFiles();
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getCombinerClass()
throws ClassNotFoundException {
return base.getCombinerClass();
}
@Override
public Configuration getConfiguration() {
return conf;
}
@Override
public Path[] getFileClassPaths() {
return base.getFileClassPaths();
}
@Override
public String[] getFileTimestamps() {
return base.getFileTimestamps();
}
@Override
public RawComparator<?> getCombinerKeyGroupingComparator() {
return base.getCombinerKeyGroupingComparator();
}
@Override
public RawComparator<?> getGroupingComparator() {
return base.getGroupingComparator();
}
@Override
public Class<? extends InputFormat<?, ?>> getInputFormatClass()
throws ClassNotFoundException {
return base.getInputFormatClass();
}
@Override
public String getJar() {
return base.getJar();
}
@Override
public JobID getJobID() {
return base.getJobID();
}
@Override
public String getJobName() {
return base.getJobName();
}
@Override
public boolean getJobSetupCleanupNeeded() {
return base.getJobSetupCleanupNeeded();
}
@Override
public boolean getTaskCleanupNeeded() {
return base.getTaskCleanupNeeded();
}
@Override
public Path[] getLocalCacheArchives() throws IOException {
return base.getLocalCacheArchives();
}
@Override
public Path[] getLocalCacheFiles() throws IOException {
return base.getLocalCacheFiles();
}
@Override
public Class<?> getMapOutputKeyClass() {
return base.getMapOutputKeyClass();
}
@Override
public Class<?> getMapOutputValueClass() {
return base.getMapOutputValueClass();
}
@Override
public Class<? extends Mapper<?, ?, ?, ?>> getMapperClass()
throws ClassNotFoundException {
return base.getMapperClass();
}
@Override
public int getMaxMapAttempts() {
return base.getMaxMapAttempts();
}
@Override
public int getMaxReduceAttempts() {
return base.getMaxMapAttempts();
}
@Override
public int getNumReduceTasks() {
return base.getNumReduceTasks();
}
@Override
public Class<? extends OutputFormat<?, ?>> getOutputFormatClass()
throws ClassNotFoundException {
return base.getOutputFormatClass();
}
@Override
public Class<?> getOutputKeyClass() {
return base.getOutputKeyClass();
}
@Override
public Class<?> getOutputValueClass() {
return base.getOutputValueClass();
}
@Override
public Class<? extends Partitioner<?, ?>> getPartitionerClass()
throws ClassNotFoundException {
return base.getPartitionerClass();
}
@Override
public boolean getProfileEnabled() {
return base.getProfileEnabled();
}
@Override
public String getProfileParams() {
return base.getProfileParams();
}
@Override
public IntegerRanges getProfileTaskRange(boolean isMap) {
return base.getProfileTaskRange(isMap);
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getReducerClass()
throws ClassNotFoundException {
return base.getReducerClass();
}
@Override
public RawComparator<?> getSortComparator() {
return base.getSortComparator();
}
@Override
public boolean getSymlink() {
return base.getSymlink();
}
@Override
public String getUser() {
return base.getUser();
}
@Override
public Path getWorkingDirectory() throws IOException {
return base.getWorkingDirectory();
}
@Override
public void progress() {
base.progress();
}
@Override
public Credentials getCredentials() {
return base.getCredentials();
}
@Override
public float getProgress() {
return base.getProgress();
}
}
| 7,801 | 23.229814 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapContextImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.chain;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.security.Credentials;
/**
* A simple wrapper class that delegates most of its functionality to the
* underlying context, but overrides the methods to do with record readers ,
* record writers and configuration.
*/
class ChainMapContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT> implements
MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
private RecordReader<KEYIN, VALUEIN> reader;
private RecordWriter<KEYOUT, VALUEOUT> output;
private TaskInputOutputContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> base;
private Configuration conf;
ChainMapContextImpl(
TaskInputOutputContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> base,
RecordReader<KEYIN, VALUEIN> rr, RecordWriter<KEYOUT, VALUEOUT> rw,
Configuration conf) {
this.reader = rr;
this.output = rw;
this.base = base;
this.conf = conf;
}
@Override
public KEYIN getCurrentKey() throws IOException, InterruptedException {
return reader.getCurrentKey();
}
@Override
public VALUEIN getCurrentValue() throws IOException, InterruptedException {
return reader.getCurrentValue();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return reader.nextKeyValue();
}
@Override
public InputSplit getInputSplit() {
if (base instanceof MapContext) {
MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mc =
(MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT>) base;
return mc.getInputSplit();
} else {
return null;
}
}
@Override
public Counter getCounter(Enum<?> counterName) {
return base.getCounter(counterName);
}
@Override
public Counter getCounter(String groupName, String counterName) {
return base.getCounter(groupName, counterName);
}
@Override
public OutputCommitter getOutputCommitter() {
return base.getOutputCommitter();
}
@Override
public void write(KEYOUT key, VALUEOUT value) throws IOException,
InterruptedException {
output.write(key, value);
}
@Override
public String getStatus() {
return base.getStatus();
}
@Override
public TaskAttemptID getTaskAttemptID() {
return base.getTaskAttemptID();
}
@Override
public void setStatus(String msg) {
base.setStatus(msg);
}
@Override
public Path[] getArchiveClassPaths() {
return base.getArchiveClassPaths();
}
@Override
public String[] getArchiveTimestamps() {
return base.getArchiveTimestamps();
}
@Override
public URI[] getCacheArchives() throws IOException {
return base.getCacheArchives();
}
@Override
public URI[] getCacheFiles() throws IOException {
return base.getCacheFiles();
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getCombinerClass()
throws ClassNotFoundException {
return base.getCombinerClass();
}
@Override
public Configuration getConfiguration() {
return conf;
}
@Override
public Path[] getFileClassPaths() {
return base.getFileClassPaths();
}
@Override
public String[] getFileTimestamps() {
return base.getFileTimestamps();
}
@Override
public RawComparator<?> getCombinerKeyGroupingComparator() {
return base.getCombinerKeyGroupingComparator();
}
@Override
public RawComparator<?> getGroupingComparator() {
return base.getGroupingComparator();
}
@Override
public Class<? extends InputFormat<?, ?>> getInputFormatClass()
throws ClassNotFoundException {
return base.getInputFormatClass();
}
@Override
public String getJar() {
return base.getJar();
}
@Override
public JobID getJobID() {
return base.getJobID();
}
@Override
public String getJobName() {
return base.getJobName();
}
@Override
public boolean getJobSetupCleanupNeeded() {
return base.getJobSetupCleanupNeeded();
}
@Override
public boolean getTaskCleanupNeeded() {
return base.getTaskCleanupNeeded();
}
@Override
public Path[] getLocalCacheArchives() throws IOException {
return base.getLocalCacheArchives();
}
@Override
public Path[] getLocalCacheFiles() throws IOException {
return base.getLocalCacheArchives();
}
@Override
public Class<?> getMapOutputKeyClass() {
return base.getMapOutputKeyClass();
}
@Override
public Class<?> getMapOutputValueClass() {
return base.getMapOutputValueClass();
}
@Override
public Class<? extends Mapper<?, ?, ?, ?>> getMapperClass()
throws ClassNotFoundException {
return base.getMapperClass();
}
@Override
public int getMaxMapAttempts() {
return base.getMaxMapAttempts();
}
@Override
public int getMaxReduceAttempts() {
return base.getMaxReduceAttempts();
}
@Override
public int getNumReduceTasks() {
return base.getNumReduceTasks();
}
@Override
public Class<? extends OutputFormat<?, ?>> getOutputFormatClass()
throws ClassNotFoundException {
return base.getOutputFormatClass();
}
@Override
public Class<?> getOutputKeyClass() {
return base.getMapOutputKeyClass();
}
@Override
public Class<?> getOutputValueClass() {
return base.getOutputValueClass();
}
@Override
public Class<? extends Partitioner<?, ?>> getPartitionerClass()
throws ClassNotFoundException {
return base.getPartitionerClass();
}
@Override
public boolean getProfileEnabled() {
return base.getProfileEnabled();
}
@Override
public String getProfileParams() {
return base.getProfileParams();
}
@Override
public IntegerRanges getProfileTaskRange(boolean isMap) {
return base.getProfileTaskRange(isMap);
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getReducerClass()
throws ClassNotFoundException {
return base.getReducerClass();
}
@Override
public RawComparator<?> getSortComparator() {
return base.getSortComparator();
}
@Override
public boolean getSymlink() {
return base.getSymlink();
}
@Override
public String getUser() {
return base.getUser();
}
@Override
public Path getWorkingDirectory() throws IOException {
return base.getWorkingDirectory();
}
@Override
public void progress() {
base.progress();
}
@Override
public Credentials getCredentials() {
return base.getCredentials();
}
@Override
public float getProgress() {
return base.getProgress();
}
}
| 8,121 | 23.68693 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import java.io.UnsupportedEncodingException;
import java.util.List;
import java.util.ArrayList;
import java.util.StringTokenizer;
import org.apache.hadoop.util.UTF8ByteArrayUtils;
/**
* This is used in {@link KeyFieldBasedComparator} &
* {@link KeyFieldBasedPartitioner}. Defines all the methods
* for parsing key specifications. The key specification is of the form:
* -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
* of the field to use, and c is the number of the first character from the
* beginning of the field. Fields and character posns are numbered starting
* with 1; a character position of zero in pos2 indicates the field's last
* character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
* of the field); if omitted from pos2, it defaults to 0 (the end of the
* field). opts are ordering options (supported options are 'nr').
*/
class KeyFieldHelper {
protected static class KeyDescription {
int beginFieldIdx = 1;
int beginChar = 1;
int endFieldIdx = 0;
int endChar = 0;
boolean numeric;
boolean reverse;
@Override
public String toString() {
return "-k"
+ beginFieldIdx + "." + beginChar + ","
+ endFieldIdx + "." + endChar
+ (numeric ? "n" : "") + (reverse ? "r" : "");
}
}
private List<KeyDescription> allKeySpecs = new ArrayList<KeyDescription>();
private byte[] keyFieldSeparator;
private boolean keySpecSeen = false;
public void setKeyFieldSeparator(String keyFieldSeparator) {
try {
this.keyFieldSeparator =
keyFieldSeparator.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("The current system does not " +
"support UTF-8 encoding!", e);
}
}
/** Required for backcompatibility with num.key.fields.for.partition in
* {@link KeyFieldBasedPartitioner} */
public void setKeyFieldSpec(int start, int end) {
if (end >= start) {
KeyDescription k = new KeyDescription();
k.beginFieldIdx = start;
k.endFieldIdx = end;
keySpecSeen = true;
allKeySpecs.add(k);
}
}
public List<KeyDescription> keySpecs() {
return allKeySpecs;
}
public int[] getWordLengths(byte []b, int start, int end) {
//Given a string like "hello how are you", it returns an array
//like [4 5, 3, 3, 3], where the first element is the number of
//fields
if (!keySpecSeen) {
//if there were no key specs, then the whole key is one word
return new int[] {1};
}
int[] lengths = new int[10];
int currLenLengths = lengths.length;
int idx = 1;
int pos;
while ((pos = UTF8ByteArrayUtils.findBytes(b, start, end,
keyFieldSeparator)) != -1) {
if (++idx == currLenLengths) {
int[] temp = lengths;
lengths = new int[(currLenLengths = currLenLengths*2)];
System.arraycopy(temp, 0, lengths, 0, temp.length);
}
lengths[idx - 1] = pos - start;
start = pos + 1;
}
if (start != end) {
lengths[idx] = end - start;
}
lengths[0] = idx; //number of words is the first element
return lengths;
}
public int getStartOffset(byte[]b, int start, int end,
int []lengthIndices, KeyDescription k) {
//if -k2.5,2 is the keyspec, the startChar is lengthIndices[1] + 5
//note that the [0]'th element is the number of fields in the key
if (lengthIndices[0] >= k.beginFieldIdx) {
int position = 0;
for (int i = 1; i < k.beginFieldIdx; i++) {
position += lengthIndices[i] + keyFieldSeparator.length;
}
if (position + k.beginChar <= (end - start)) {
return start + position + k.beginChar - 1;
}
}
return -1;
}
public int getEndOffset(byte[]b, int start, int end,
int []lengthIndices, KeyDescription k) {
//if -k2,2.8 is the keyspec, the endChar is lengthIndices[1] + 8
//note that the [0]'th element is the number of fields in the key
if (k.endFieldIdx == 0) {
//there is no end field specified for this keyspec. So the remaining
//part of the key is considered in its entirety.
return end - 1;
}
if (lengthIndices[0] >= k.endFieldIdx) {
int position = 0;
int i;
for (i = 1; i < k.endFieldIdx; i++) {
position += lengthIndices[i] + keyFieldSeparator.length;
}
if (k.endChar == 0) {
position += lengthIndices[i];
}
if (position + k.endChar <= (end - start)) {
return start + position + k.endChar - 1;
}
return end - 1;
}
return end - 1;
}
public void parseOption(String option) {
if (option == null || option.equals("")) {
//we will have only default comparison
return;
}
StringTokenizer args = new StringTokenizer(option);
KeyDescription global = new KeyDescription();
while (args.hasMoreTokens()) {
String arg = args.nextToken();
if (arg.equals("-n")) {
global.numeric = true;
}
if (arg.equals("-r")) {
global.reverse = true;
}
if (arg.equals("-nr")) {
global.numeric = true;
global.reverse = true;
}
if (arg.startsWith("-k")) {
KeyDescription k = parseKey(arg, args);
if (k != null) {
allKeySpecs.add(k);
keySpecSeen = true;
}
}
}
for (KeyDescription key : allKeySpecs) {
if (!(key.reverse | key.numeric)) {
key.reverse = global.reverse;
key.numeric = global.numeric;
}
}
if (allKeySpecs.size() == 0) {
allKeySpecs.add(global);
}
}
private KeyDescription parseKey(String arg, StringTokenizer args) {
//we allow for -k<arg> and -k <arg>
String keyArgs = null;
if (arg.length() == 2) {
if (args.hasMoreTokens()) {
keyArgs = args.nextToken();
}
} else {
keyArgs = arg.substring(2);
}
if (keyArgs == null || keyArgs.length() == 0) {
return null;
}
StringTokenizer st = new StringTokenizer(keyArgs,"nr.,",true);
KeyDescription key = new KeyDescription();
String token;
//the key is of the form 1[.3][nr][,1.5][nr]
if (st.hasMoreTokens()) {
token = st.nextToken();
//the first token must be a number
key.beginFieldIdx = Integer.parseInt(token);
}
if (st.hasMoreTokens()) {
token = st.nextToken();
if (token.equals(".")) {
token = st.nextToken();
key.beginChar = Integer.parseInt(token);
if (st.hasMoreTokens()) {
token = st.nextToken();
} else {
return key;
}
}
do {
if (token.equals("n")) {
key.numeric = true;
}
else if (token.equals("r")) {
key.reverse = true;
}
else break;
if (st.hasMoreTokens()) {
token = st.nextToken();
} else {
return key;
}
} while (true);
if (token.equals(",")) {
token = st.nextToken();
//the first token must be a number
key.endFieldIdx = Integer.parseInt(token);
if (st.hasMoreTokens()) {
token = st.nextToken();
if (token.equals(".")) {
token = st.nextToken();
key.endChar = Integer.parseInt(token);
if (st.hasMoreTokens()) {
token = st.nextToken();
} else {
return key;
}
}
do {
if (token.equals("n")) {
key.numeric = true;
}
else if (token.equals("r")) {
key.reverse = true;
}
else {
throw new IllegalArgumentException("Invalid -k argument. " +
"Must be of the form -k pos1,[pos2], where pos is of the form " +
"f[.c]nr");
}
if (st.hasMoreTokens()) {
token = st.nextToken();
} else {
break;
}
} while (true);
}
return key;
}
throw new IllegalArgumentException("Invalid -k argument. " +
"Must be of the form -k pos1,[pos2], where pos is of the form " +
"f[.c]nr");
}
return key;
}
private void printKey(KeyDescription key) {
System.out.println("key.beginFieldIdx: " + key.beginFieldIdx);
System.out.println("key.beginChar: " + key.beginChar);
System.out.println("key.endFieldIdx: " + key.endFieldIdx);
System.out.println("key.endChar: " + key.endChar);
System.out.println("key.numeric: " + key.numeric);
System.out.println("key.reverse: " + key.reverse);
System.out.println("parseKey over");
}
}
| 9,627 | 31.417508 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Utility for collecting samples and writing a partition file for
* {@link TotalOrderPartitioner}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InputSampler<K,V> extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(InputSampler.class);
static int printUsage() {
System.out.println("sampler -r <reduces>\n" +
" [-inFormat <input format class>]\n" +
" [-keyClass <map input & output key class>]\n" +
" [-splitRandom <double pcnt> <numSamples> <maxsplits> | " +
" // Sample from random splits at random (general)\n" +
" -splitSample <numSamples> <maxsplits> | " +
" // Sample from first records in splits (random data)\n"+
" -splitInterval <double pcnt> <maxsplits>]" +
" // Sample from splits at intervals (sorted data)");
System.out.println("Default sampler: -splitRandom 0.1 10000 10");
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
public InputSampler(Configuration conf) {
setConf(conf);
}
/**
* Interface to sample using an
* {@link org.apache.hadoop.mapreduce.InputFormat}.
*/
public interface Sampler<K,V> {
/**
* For a given job, collect and return a subset of the keys from the
* input data.
*/
K[] getSample(InputFormat<K,V> inf, Job job)
throws IOException, InterruptedException;
}
/**
* Samples the first n records from s splits.
* Inexpensive way to sample random data.
*/
public static class SplitSampler<K,V> implements Sampler<K,V> {
protected final int numSamples;
protected final int maxSplitsSampled;
/**
* Create a SplitSampler sampling <em>all</em> splits.
* Takes the first numSamples / numSplits records from each split.
* @param numSamples Total number of samples to obtain from all selected
* splits.
*/
public SplitSampler(int numSamples) {
this(numSamples, Integer.MAX_VALUE);
}
/**
* Create a new SplitSampler.
* @param numSamples Total number of samples to obtain from all selected
* splits.
* @param maxSplitsSampled The maximum number of splits to examine.
*/
public SplitSampler(int numSamples, int maxSplitsSampled) {
this.numSamples = numSamples;
this.maxSplitsSampled = maxSplitsSampled;
}
/**
* From each split sampled, take the first numSamples / numSplits records.
*/
@SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
public K[] getSample(InputFormat<K,V> inf, Job job)
throws IOException, InterruptedException {
List<InputSplit> splits = inf.getSplits(job);
ArrayList<K> samples = new ArrayList<K>(numSamples);
int splitsToSample = Math.min(maxSplitsSampled, splits.size());
int samplesPerSplit = numSamples / splitsToSample;
long records = 0;
for (int i = 0; i < splitsToSample; ++i) {
TaskAttemptContext samplingContext = new TaskAttemptContextImpl(
job.getConfiguration(), new TaskAttemptID());
RecordReader<K,V> reader = inf.createRecordReader(
splits.get(i), samplingContext);
reader.initialize(splits.get(i), samplingContext);
while (reader.nextKeyValue()) {
samples.add(ReflectionUtils.copy(job.getConfiguration(),
reader.getCurrentKey(), null));
++records;
if ((i+1) * samplesPerSplit <= records) {
break;
}
}
reader.close();
}
return (K[])samples.toArray();
}
}
/**
* Sample from random points in the input.
* General-purpose sampler. Takes numSamples / maxSplitsSampled inputs from
* each split.
*/
public static class RandomSampler<K,V> implements Sampler<K,V> {
protected double freq;
protected final int numSamples;
protected final int maxSplitsSampled;
/**
* Create a new RandomSampler sampling <em>all</em> splits.
* This will read every split at the client, which is very expensive.
* @param freq Probability with which a key will be chosen.
* @param numSamples Total number of samples to obtain from all selected
* splits.
*/
public RandomSampler(double freq, int numSamples) {
this(freq, numSamples, Integer.MAX_VALUE);
}
/**
* Create a new RandomSampler.
* @param freq Probability with which a key will be chosen.
* @param numSamples Total number of samples to obtain from all selected
* splits.
* @param maxSplitsSampled The maximum number of splits to examine.
*/
public RandomSampler(double freq, int numSamples, int maxSplitsSampled) {
this.freq = freq;
this.numSamples = numSamples;
this.maxSplitsSampled = maxSplitsSampled;
}
/**
* Randomize the split order, then take the specified number of keys from
* each split sampled, where each key is selected with the specified
* probability and possibly replaced by a subsequently selected key when
* the quota of keys from that split is satisfied.
*/
@SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
public K[] getSample(InputFormat<K,V> inf, Job job)
throws IOException, InterruptedException {
List<InputSplit> splits = inf.getSplits(job);
ArrayList<K> samples = new ArrayList<K>(numSamples);
int splitsToSample = Math.min(maxSplitsSampled, splits.size());
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
LOG.debug("seed: " + seed);
// shuffle splits
for (int i = 0; i < splits.size(); ++i) {
InputSplit tmp = splits.get(i);
int j = r.nextInt(splits.size());
splits.set(i, splits.get(j));
splits.set(j, tmp);
}
// our target rate is in terms of the maximum number of sample splits,
// but we accept the possibility of sampling additional splits to hit
// the target sample keyset
for (int i = 0; i < splitsToSample ||
(i < splits.size() && samples.size() < numSamples); ++i) {
TaskAttemptContext samplingContext = new TaskAttemptContextImpl(
job.getConfiguration(), new TaskAttemptID());
RecordReader<K,V> reader = inf.createRecordReader(
splits.get(i), samplingContext);
reader.initialize(splits.get(i), samplingContext);
while (reader.nextKeyValue()) {
if (r.nextDouble() <= freq) {
if (samples.size() < numSamples) {
samples.add(ReflectionUtils.copy(job.getConfiguration(),
reader.getCurrentKey(), null));
} else {
// When exceeding the maximum number of samples, replace a
// random element with this one, then adjust the frequency
// to reflect the possibility of existing elements being
// pushed out
int ind = r.nextInt(numSamples);
samples.set(ind, ReflectionUtils.copy(job.getConfiguration(),
reader.getCurrentKey(), null));
freq *= (numSamples - 1) / (double) numSamples;
}
}
}
reader.close();
}
return (K[])samples.toArray();
}
}
/**
* Sample from s splits at regular intervals.
* Useful for sorted data.
*/
public static class IntervalSampler<K,V> implements Sampler<K,V> {
protected final double freq;
protected final int maxSplitsSampled;
/**
* Create a new IntervalSampler sampling <em>all</em> splits.
* @param freq The frequency with which records will be emitted.
*/
public IntervalSampler(double freq) {
this(freq, Integer.MAX_VALUE);
}
/**
* Create a new IntervalSampler.
* @param freq The frequency with which records will be emitted.
* @param maxSplitsSampled The maximum number of splits to examine.
* @see #getSample
*/
public IntervalSampler(double freq, int maxSplitsSampled) {
this.freq = freq;
this.maxSplitsSampled = maxSplitsSampled;
}
/**
* For each split sampled, emit when the ratio of the number of records
* retained to the total record count is less than the specified
* frequency.
*/
@SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
public K[] getSample(InputFormat<K,V> inf, Job job)
throws IOException, InterruptedException {
List<InputSplit> splits = inf.getSplits(job);
ArrayList<K> samples = new ArrayList<K>();
int splitsToSample = Math.min(maxSplitsSampled, splits.size());
long records = 0;
long kept = 0;
for (int i = 0; i < splitsToSample; ++i) {
TaskAttemptContext samplingContext = new TaskAttemptContextImpl(
job.getConfiguration(), new TaskAttemptID());
RecordReader<K,V> reader = inf.createRecordReader(
splits.get(i), samplingContext);
reader.initialize(splits.get(i), samplingContext);
while (reader.nextKeyValue()) {
++records;
if ((double) kept / records < freq) {
samples.add(ReflectionUtils.copy(job.getConfiguration(),
reader.getCurrentKey(), null));
++kept;
}
}
reader.close();
}
return (K[])samples.toArray();
}
}
/**
* Write a partition file for the given job, using the Sampler provided.
* Queries the sampler for a sample keyset, sorts by the output key
* comparator, selects the keys for each rank, and writes to the destination
* returned from {@link TotalOrderPartitioner#getPartitionFile}.
*/
@SuppressWarnings("unchecked") // getInputFormat, getOutputKeyComparator
public static <K,V> void writePartitionFile(Job job, Sampler<K,V> sampler)
throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = job.getConfiguration();
final InputFormat inf =
ReflectionUtils.newInstance(job.getInputFormatClass(), conf);
int numPartitions = job.getNumReduceTasks();
K[] samples = (K[])sampler.getSample(inf, job);
LOG.info("Using " + samples.length + " samples");
RawComparator<K> comparator =
(RawComparator<K>) job.getSortComparator();
Arrays.sort(samples, comparator);
Path dst = new Path(TotalOrderPartitioner.getPartitionFile(conf));
FileSystem fs = dst.getFileSystem(conf);
if (fs.exists(dst)) {
fs.delete(dst, false);
}
SequenceFile.Writer writer = SequenceFile.createWriter(fs,
conf, dst, job.getMapOutputKeyClass(), NullWritable.class);
NullWritable nullValue = NullWritable.get();
float stepSize = samples.length / (float) numPartitions;
int last = -1;
for(int i = 1; i < numPartitions; ++i) {
int k = Math.round(stepSize * i);
while (last >= k && comparator.compare(samples[last], samples[k]) == 0) {
++k;
}
writer.append(samples[k], nullValue);
last = k;
}
writer.close();
}
/**
* Driver for InputSampler from the command line.
* Configures a JobConf instance and calls {@link #writePartitionFile}.
*/
public int run(String[] args) throws Exception {
Job job = Job.getInstance(getConf());
ArrayList<String> otherArgs = new ArrayList<String>();
Sampler<K,V> sampler = null;
for(int i=0; i < args.length; ++i) {
try {
if ("-r".equals(args[i])) {
job.setNumReduceTasks(Integer.parseInt(args[++i]));
} else if ("-inFormat".equals(args[i])) {
job.setInputFormatClass(
Class.forName(args[++i]).asSubclass(InputFormat.class));
} else if ("-keyClass".equals(args[i])) {
job.setMapOutputKeyClass(
Class.forName(args[++i]).asSubclass(WritableComparable.class));
} else if ("-splitSample".equals(args[i])) {
int numSamples = Integer.parseInt(args[++i]);
int maxSplits = Integer.parseInt(args[++i]);
if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE;
sampler = new SplitSampler<K,V>(numSamples, maxSplits);
} else if ("-splitRandom".equals(args[i])) {
double pcnt = Double.parseDouble(args[++i]);
int numSamples = Integer.parseInt(args[++i]);
int maxSplits = Integer.parseInt(args[++i]);
if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE;
sampler = new RandomSampler<K,V>(pcnt, numSamples, maxSplits);
} else if ("-splitInterval".equals(args[i])) {
double pcnt = Double.parseDouble(args[++i]);
int maxSplits = Integer.parseInt(args[++i]);
if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE;
sampler = new IntervalSampler<K,V>(pcnt, maxSplits);
} else {
otherArgs.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " +
args[i-1]);
return printUsage();
}
}
if (job.getNumReduceTasks() <= 1) {
System.err.println("Sampler requires more than one reducer");
return printUsage();
}
if (otherArgs.size() < 2) {
System.out.println("ERROR: Wrong number of parameters: ");
return printUsage();
}
if (null == sampler) {
sampler = new RandomSampler<K,V>(0.1, 10000, 10);
}
Path outf = new Path(otherArgs.remove(otherArgs.size() - 1));
TotalOrderPartitioner.setPartitionFile(getConf(), outf);
for (String s : otherArgs) {
FileInputFormat.addInputPath(job, new Path(s));
}
InputSampler.<K,V>writePartitionFile(job, sampler);
return 0;
}
public static void main(String[] args) throws Exception {
InputSampler<?,?> sampler = new InputSampler(new Configuration());
int res = ToolRunner.run(sampler, args);
System.exit(res);
}
}
| 16,348 | 38.11244 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BinaryComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Partitioner;
/**
* <p>Partition {@link BinaryComparable} keys using a configurable part of
* the bytes array returned by {@link BinaryComparable#getBytes()}.</p>
*
* <p>The subarray to be used for the partitioning can be defined by means
* of the following properties:
* <ul>
* <li>
* <i>mapreduce.partition.binarypartitioner.left.offset</i>:
* left offset in array (0 by default)
* </li>
* <li>
* <i>mapreduce.partition.binarypartitioner.right.offset</i>:
* right offset in array (-1 by default)
* </li>
* </ul>
* Like in Python, both negative and positive offsets are allowed, but
* the meaning is slightly different. In case of an array of length 5,
* for instance, the possible offsets are:
* <pre><code>
* +---+---+---+---+---+
* | B | B | B | B | B |
* +---+---+---+---+---+
* 0 1 2 3 4
* -5 -4 -3 -2 -1
* </code></pre>
* The first row of numbers gives the position of the offsets 0...5 in
* the array; the second row gives the corresponding negative offsets.
* Contrary to Python, the specified subarray has byte <code>i</code>
* and <code>j</code> as first and last element, repectively, when
* <code>i</code> and <code>j</code> are the left and right offset.
*
* <p>For Hadoop programs written in Java, it is advisable to use one of
* the following static convenience methods for setting the offsets:
* <ul>
* <li>{@link #setOffsets}</li>
* <li>{@link #setLeftOffset}</li>
* <li>{@link #setRightOffset}</li>
* </ul>
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BinaryPartitioner<V> extends Partitioner<BinaryComparable, V>
implements Configurable {
public static final String LEFT_OFFSET_PROPERTY_NAME =
"mapreduce.partition.binarypartitioner.left.offset";
public static final String RIGHT_OFFSET_PROPERTY_NAME =
"mapreduce.partition.binarypartitioner.right.offset";
/**
* Set the subarray to be used for partitioning to
* <code>bytes[left:(right+1)]</code> in Python syntax.
*
* @param conf configuration object
* @param left left Python-style offset
* @param right right Python-style offset
*/
public static void setOffsets(Configuration conf, int left, int right) {
conf.setInt(LEFT_OFFSET_PROPERTY_NAME, left);
conf.setInt(RIGHT_OFFSET_PROPERTY_NAME, right);
}
/**
* Set the subarray to be used for partitioning to
* <code>bytes[offset:]</code> in Python syntax.
*
* @param conf configuration object
* @param offset left Python-style offset
*/
public static void setLeftOffset(Configuration conf, int offset) {
conf.setInt(LEFT_OFFSET_PROPERTY_NAME, offset);
}
/**
* Set the subarray to be used for partitioning to
* <code>bytes[:(offset+1)]</code> in Python syntax.
*
* @param conf configuration object
* @param offset right Python-style offset
*/
public static void setRightOffset(Configuration conf, int offset) {
conf.setInt(RIGHT_OFFSET_PROPERTY_NAME, offset);
}
private Configuration conf;
private int leftOffset, rightOffset;
public void setConf(Configuration conf) {
this.conf = conf;
leftOffset = conf.getInt(LEFT_OFFSET_PROPERTY_NAME, 0);
rightOffset = conf.getInt(RIGHT_OFFSET_PROPERTY_NAME, -1);
}
public Configuration getConf() {
return conf;
}
/**
* Use (the specified slice of the array returned by)
* {@link BinaryComparable#getBytes()} to partition.
*/
@Override
public int getPartition(BinaryComparable key, V value, int numPartitions) {
int length = key.getLength();
int leftIndex = (leftOffset + length) % length;
int rightIndex = (rightOffset + length) % length;
int hash = WritableComparator.hashBytes(key.getBytes(),
leftIndex, rightIndex - leftIndex + 1);
return (hash & Integer.MAX_VALUE) % numPartitions;
}
}
| 5,087 | 34.58042 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription;
/**
* This comparator implementation provides a subset of the features provided
* by the Unix/GNU Sort. In particular, the supported features are:
* -n, (Sort numerically)
* -r, (Reverse the result of comparison)
* -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
* of the field to use, and c is the number of the first character from the
* beginning of the field. Fields and character posns are numbered starting
* with 1; a character position of zero in pos2 indicates the field's last
* character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
* of the field); if omitted from pos2, it defaults to 0 (the end of the
* field). opts are ordering options (any of 'nr' as described above).
* We assume that the fields in the key are separated by
* {@link JobContext#MAP_OUTPUT_KEY_FIELD_SEPERATOR}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class KeyFieldBasedComparator<K, V> extends WritableComparator
implements Configurable {
private KeyFieldHelper keyFieldHelper = new KeyFieldHelper();
public static String COMPARATOR_OPTIONS = "mapreduce.partition.keycomparator.options";
private static final byte NEGATIVE = (byte)'-';
private static final byte ZERO = (byte)'0';
private static final byte DECIMAL = (byte)'.';
private Configuration conf;
public void setConf(Configuration conf) {
this.conf = conf;
String option = conf.get(COMPARATOR_OPTIONS);
String keyFieldSeparator = conf.get(MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR,"\t");
keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator);
keyFieldHelper.parseOption(option);
}
public Configuration getConf() {
return conf;
}
public KeyFieldBasedComparator() {
super(Text.class);
}
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
int n2 = WritableUtils.decodeVIntSize(b2[s2]);
List <KeyDescription> allKeySpecs = keyFieldHelper.keySpecs();
if (allKeySpecs.size() == 0) {
return compareBytes(b1, s1 + n1, l1 - n1, b2, s2 + n2, l2 - n2);
}
int []lengthIndicesFirst =
keyFieldHelper.getWordLengths(b1, s1 + n1, s1 + l1);
int []lengthIndicesSecond =
keyFieldHelper.getWordLengths(b2, s2 + n2, s2 + l2);
for (KeyDescription keySpec : allKeySpecs) {
int startCharFirst = keyFieldHelper.getStartOffset(b1, s1 + n1, s1 + l1,
lengthIndicesFirst, keySpec);
int endCharFirst = keyFieldHelper.getEndOffset(b1, s1 + n1, s1 + l1,
lengthIndicesFirst, keySpec);
int startCharSecond = keyFieldHelper.getStartOffset(b2, s2 + n2, s2 + l2,
lengthIndicesSecond, keySpec);
int endCharSecond = keyFieldHelper.getEndOffset(b2, s2 + n2, s2 + l2,
lengthIndicesSecond, keySpec);
int result;
if ((result = compareByteSequence(b1, startCharFirst, endCharFirst, b2,
startCharSecond, endCharSecond, keySpec)) != 0) {
return result;
}
}
return 0;
}
private int compareByteSequence(byte[] first, int start1, int end1,
byte[] second, int start2, int end2, KeyDescription key) {
if (start1 == -1) {
if (key.reverse) {
return 1;
}
return -1;
}
if (start2 == -1) {
if (key.reverse) {
return -1;
}
return 1;
}
int compareResult = 0;
if (!key.numeric) {
compareResult = compareBytes(first, start1, end1-start1 + 1, second,
start2, end2 - start2 + 1);
}
if (key.numeric) {
compareResult = numericalCompare (first, start1, end1, second, start2,
end2);
}
if (key.reverse) {
return -compareResult;
}
return compareResult;
}
private int numericalCompare (byte[] a, int start1, int end1,
byte[] b, int start2, int end2) {
int i = start1;
int j = start2;
int mul = 1;
byte first_a = a[i];
byte first_b = b[j];
if (first_a == NEGATIVE) {
if (first_b != NEGATIVE) {
//check for cases like -0.0 and 0.0 (they should be declared equal)
return oneNegativeCompare(a, start1 + 1, end1, b, start2, end2);
}
i++;
}
if (first_b == NEGATIVE) {
if (first_a != NEGATIVE) {
//check for cases like 0.0 and -0.0 (they should be declared equal)
return -oneNegativeCompare(b, start2+1, end2, a, start1, end1);
}
j++;
}
if (first_b == NEGATIVE && first_a == NEGATIVE) {
mul = -1;
}
//skip over ZEROs
while (i <= end1) {
if (a[i] != ZERO) {
break;
}
i++;
}
while (j <= end2) {
if (b[j] != ZERO) {
break;
}
j++;
}
//skip over equal characters and stopping at the first nondigit char
//The nondigit character could be '.'
while (i <= end1 && j <= end2) {
if (!isdigit(a[i]) || a[i] != b[j]) {
break;
}
i++; j++;
}
if (i <= end1) {
first_a = a[i];
}
if (j <= end2) {
first_b = b[j];
}
//store the result of the difference. This could be final result if the
//number of digits in the mantissa is the same in both the numbers
int firstResult = first_a - first_b;
//check whether we hit a decimal in the earlier scan
if ((first_a == DECIMAL && (!isdigit(first_b) || j > end2)) ||
(first_b == DECIMAL && (!isdigit(first_a) || i > end1))) {
return ((mul < 0) ? -decimalCompare(a, i, end1, b, j, end2) :
decimalCompare(a, i, end1, b, j, end2));
}
//check the number of digits in the mantissa of the numbers
int numRemainDigits_a = 0;
int numRemainDigits_b = 0;
while (i <= end1) {
//if we encounter a non-digit treat the corresponding number as being
//smaller
if (isdigit(a[i++])) {
numRemainDigits_a++;
} else break;
}
while (j <= end2) {
//if we encounter a non-digit treat the corresponding number as being
//smaller
if (isdigit(b[j++])) {
numRemainDigits_b++;
} else break;
}
int ret = numRemainDigits_a - numRemainDigits_b;
if (ret == 0) {
return ((mul < 0) ? -firstResult : firstResult);
} else {
return ((mul < 0) ? -ret : ret);
}
}
private boolean isdigit(byte b) {
if ('0' <= b && b <= '9') {
return true;
}
return false;
}
private int decimalCompare(byte[] a, int i, int end1,
byte[] b, int j, int end2) {
if (i > end1) {
//if a[] has nothing remaining
return -decimalCompare1(b, ++j, end2);
}
if (j > end2) {
//if b[] has nothing remaining
return decimalCompare1(a, ++i, end1);
}
if (a[i] == DECIMAL && b[j] == DECIMAL) {
while (i <= end1 && j <= end2) {
if (a[i] != b[j]) {
if (isdigit(a[i]) && isdigit(b[j])) {
return a[i] - b[j];
}
if (isdigit(a[i])) {
return 1;
}
if (isdigit(b[j])) {
return -1;
}
return 0;
}
i++; j++;
}
if (i > end1 && j > end2) {
return 0;
}
if (i > end1) {
//check whether there is a non-ZERO digit after potentially
//a number of ZEROs (e.g., a=.4444, b=.444400004)
return -decimalCompare1(b, j, end2);
}
if (j > end2) {
//check whether there is a non-ZERO digit after potentially
//a number of ZEROs (e.g., b=.4444, a=.444400004)
return decimalCompare1(a, i, end1);
}
}
else if (a[i] == DECIMAL) {
return decimalCompare1(a, ++i, end1);
}
else if (b[j] == DECIMAL) {
return -decimalCompare1(b, ++j, end2);
}
return 0;
}
private int decimalCompare1(byte[] a, int i, int end) {
while (i <= end) {
if (a[i] == ZERO) {
i++;
continue;
}
if (isdigit(a[i])) {
return 1;
} else {
return 0;
}
}
return 0;
}
private int oneNegativeCompare(byte[] a, int start1, int end1,
byte[] b, int start2, int end2) {
//here a[] is negative and b[] is positive
//We have to ascertain whether the number contains any digits.
//If it does, then it is a smaller number for sure. If not,
//then we need to scan b[] to find out whether b[] has a digit
//If b[] does contain a digit, then b[] is certainly
//greater. If not, that is, both a[] and b[] don't contain
//digits then they should be considered equal.
if (!isZero(a, start1, end1)) {
return -1;
}
//reached here - this means that a[] is a ZERO
if (!isZero(b, start2, end2)) {
return -1;
}
//reached here - both numbers are basically ZEROs and hence
//they should compare equal
return 0;
}
private boolean isZero(byte a[], int start, int end) {
//check for zeros in the significand part as well as the decimal part
//note that we treat the non-digit characters as ZERO
int i = start;
//we check the significand for being a ZERO
while (i <= end) {
if (a[i] != ZERO) {
if (a[i] != DECIMAL && isdigit(a[i])) {
return false;
}
break;
}
i++;
}
if (i != (end+1) && a[i++] == DECIMAL) {
//we check the decimal part for being a ZERO
while (i <= end) {
if (a[i] != ZERO) {
if (isdigit(a[i])) {
return false;
}
break;
}
i++;
}
}
return true;
}
/**
* Set the {@link KeyFieldBasedComparator} options used to compare keys.
*
* @param keySpec the key specification of the form -k pos1[,pos2], where,
* pos is of the form f[.c][opts], where f is the number
* of the key field to use, and c is the number of the first character from
* the beginning of the field. Fields and character posns are numbered
* starting with 1; a character position of zero in pos2 indicates the
* field's last character. If '.c' is omitted from pos1, it defaults to 1
* (the beginning of the field); if omitted from pos2, it defaults to 0
* (the end of the field). opts are ordering options. The supported options
* are:
* -n, (Sort numerically)
* -r, (Reverse the result of comparison)
*/
public static void setKeyFieldComparatorOptions(Job job, String keySpec) {
job.getConfiguration().set(COMPARATOR_OPTIONS, keySpec);
}
/**
* Get the {@link KeyFieldBasedComparator} options
*/
public static String getKeyFieldComparatorOption(JobContext job) {
return job.getConfiguration().get(COMPARATOR_OPTIONS);
}
}
| 12,200 | 31.449468 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import java.io.IOException;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BinaryComparable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Partitioner effecting a total order by reading split points from
* an externally generated source.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TotalOrderPartitioner<K extends WritableComparable<?>,V>
extends Partitioner<K,V> implements Configurable {
private Node partitions;
public static final String DEFAULT_PATH = "_partition.lst";
public static final String PARTITIONER_PATH =
"mapreduce.totalorderpartitioner.path";
public static final String MAX_TRIE_DEPTH =
"mapreduce.totalorderpartitioner.trie.maxdepth";
public static final String NATURAL_ORDER =
"mapreduce.totalorderpartitioner.naturalorder";
Configuration conf;
private static final Log LOG = LogFactory.getLog(TotalOrderPartitioner.class);
public TotalOrderPartitioner() { }
/**
* Read in the partition file and build indexing data structures.
* If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and
* <tt>total.order.partitioner.natural.order</tt> is not false, a trie
* of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
* will be built. Otherwise, keys will be located using a binary search of
* the partition keyset using the {@link org.apache.hadoop.io.RawComparator}
* defined for this job. The input file must be sorted with the same
* comparator and contain {@link Job#getNumReduceTasks()} - 1 keys.
*/
@SuppressWarnings("unchecked") // keytype from conf not static
public void setConf(Configuration conf) {
try {
this.conf = conf;
String parts = getPartitionFile(conf);
final Path partFile = new Path(parts);
final FileSystem fs = (DEFAULT_PATH.equals(parts))
? FileSystem.getLocal(conf) // assume in DistributedCache
: partFile.getFileSystem(conf);
Job job = Job.getInstance(conf);
Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass();
K[] splitPoints = readPartitions(fs, partFile, keyClass, conf);
if (splitPoints.length != job.getNumReduceTasks() - 1) {
throw new IOException("Wrong number of partitions in keyset");
}
RawComparator<K> comparator =
(RawComparator<K>) job.getSortComparator();
for (int i = 0; i < splitPoints.length - 1; ++i) {
if (comparator.compare(splitPoints[i], splitPoints[i+1]) >= 0) {
throw new IOException("Split points are out of order");
}
}
boolean natOrder =
conf.getBoolean(NATURAL_ORDER, true);
if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) {
partitions = buildTrie((BinaryComparable[])splitPoints, 0,
splitPoints.length, new byte[0],
// Now that blocks of identical splitless trie nodes are
// represented reentrantly, and we develop a leaf for any trie
// node with only one split point, the only reason for a depth
// limit is to refute stack overflow or bloat in the pathological
// case where the split points are long and mostly look like bytes
// iii...iixii...iii . Therefore, we make the default depth
// limit large but not huge.
conf.getInt(MAX_TRIE_DEPTH, 200));
} else {
partitions = new BinarySearchNode(splitPoints, comparator);
}
} catch (IOException e) {
throw new IllegalArgumentException("Can't read partitions file", e);
}
}
public Configuration getConf() {
return conf;
}
// by construction, we know if our keytype
@SuppressWarnings("unchecked") // is memcmp-able and uses the trie
public int getPartition(K key, V value, int numPartitions) {
return partitions.findPartition(key);
}
/**
* Set the path to the SequenceFile storing the sorted partition keyset.
* It must be the case that for <tt>R</tt> reduces, there are <tt>R-1</tt>
* keys in the SequenceFile.
*/
public static void setPartitionFile(Configuration conf, Path p) {
conf.set(PARTITIONER_PATH, p.toString());
}
/**
* Get the path to the SequenceFile storing the sorted partition keyset.
* @see #setPartitionFile(Configuration, Path)
*/
public static String getPartitionFile(Configuration conf) {
return conf.get(PARTITIONER_PATH, DEFAULT_PATH);
}
/**
* Interface to the partitioner to locate a key in the partition keyset.
*/
interface Node<T> {
/**
* Locate partition in keyset K, st [Ki..Ki+1) defines a partition,
* with implicit K0 = -inf, Kn = +inf, and |K| = #partitions - 1.
*/
int findPartition(T key);
}
/**
* Base class for trie nodes. If the keytype is memcomp-able, this builds
* tries of the first <tt>total.order.partitioner.max.trie.depth</tt>
* bytes.
*/
static abstract class TrieNode implements Node<BinaryComparable> {
private final int level;
TrieNode(int level) {
this.level = level;
}
int getLevel() {
return level;
}
}
/**
* For types that are not {@link org.apache.hadoop.io.BinaryComparable} or
* where disabled by <tt>total.order.partitioner.natural.order</tt>,
* search the partition keyset with a binary search.
*/
class BinarySearchNode implements Node<K> {
private final K[] splitPoints;
private final RawComparator<K> comparator;
BinarySearchNode(K[] splitPoints, RawComparator<K> comparator) {
this.splitPoints = splitPoints;
this.comparator = comparator;
}
public int findPartition(K key) {
final int pos = Arrays.binarySearch(splitPoints, key, comparator) + 1;
return (pos < 0) ? -pos : pos;
}
}
/**
* An inner trie node that contains 256 children based on the next
* character.
*/
class InnerTrieNode extends TrieNode {
private TrieNode[] child = new TrieNode[256];
InnerTrieNode(int level) {
super(level);
}
public int findPartition(BinaryComparable key) {
int level = getLevel();
if (key.getLength() <= level) {
return child[0].findPartition(key);
}
return child[0xFF & key.getBytes()[level]].findPartition(key);
}
}
/**
* @param level the tree depth at this node
* @param splitPoints the full split point vector, which holds
* the split point or points this leaf node
* should contain
* @param lower first INcluded element of splitPoints
* @param upper first EXcluded element of splitPoints
* @return a leaf node. They come in three kinds: no split points
* [and the findParttion returns a canned index], one split
* point [and we compare with a single comparand], or more
* than one [and we do a binary search]. The last case is
* rare.
*/
private TrieNode LeafTrieNodeFactory
(int level, BinaryComparable[] splitPoints, int lower, int upper) {
switch (upper - lower) {
case 0:
return new UnsplitTrieNode(level, lower);
case 1:
return new SinglySplitTrieNode(level, splitPoints, lower);
default:
return new LeafTrieNode(level, splitPoints, lower, upper);
}
}
/**
* A leaf trie node that scans for the key between lower..upper.
*
* We don't generate many of these now, since we usually continue trie-ing
* when more than one split point remains at this level. and we make different
* objects for nodes with 0 or 1 split point.
*/
private class LeafTrieNode extends TrieNode {
final int lower;
final int upper;
final BinaryComparable[] splitPoints;
LeafTrieNode(int level, BinaryComparable[] splitPoints, int lower, int upper) {
super(level);
this.lower = lower;
this.upper = upper;
this.splitPoints = splitPoints;
}
public int findPartition(BinaryComparable key) {
final int pos = Arrays.binarySearch(splitPoints, lower, upper, key) + 1;
return (pos < 0) ? -pos : pos;
}
}
private class UnsplitTrieNode extends TrieNode {
final int result;
UnsplitTrieNode(int level, int value) {
super(level);
this.result = value;
}
public int findPartition(BinaryComparable key) {
return result;
}
}
private class SinglySplitTrieNode extends TrieNode {
final int lower;
final BinaryComparable mySplitPoint;
SinglySplitTrieNode(int level, BinaryComparable[] splitPoints, int lower) {
super(level);
this.lower = lower;
this.mySplitPoint = splitPoints[lower];
}
public int findPartition(BinaryComparable key) {
return lower + (key.compareTo(mySplitPoint) < 0 ? 0 : 1);
}
}
/**
* Read the cut points from the given IFile.
* @param fs The file system
* @param p The path to read
* @param keyClass The map output key class
* @param job The job config
* @throws IOException
*/
// matching key types enforced by passing in
@SuppressWarnings("unchecked") // map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass,
Configuration conf) throws IOException {
SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, conf);
ArrayList<K> parts = new ArrayList<K>();
K key = ReflectionUtils.newInstance(keyClass, conf);
NullWritable value = NullWritable.get();
try {
while (reader.next(key, value)) {
parts.add(key);
key = ReflectionUtils.newInstance(keyClass, conf);
}
reader.close();
reader = null;
} finally {
IOUtils.cleanup(LOG, reader);
}
return parts.toArray((K[])Array.newInstance(keyClass, parts.size()));
}
/**
*
* This object contains a TrieNodeRef if there is such a thing that
* can be repeated. Two adjacent trie node slots that contain no
* split points can be filled with the same trie node, even if they
* are not on the same level. See buildTreeRec, below.
*
*/
private class CarriedTrieNodeRef
{
TrieNode content;
CarriedTrieNodeRef() {
content = null;
}
}
/**
* Given a sorted set of cut points, build a trie that will find the correct
* partition quickly.
* @param splits the list of cut points
* @param lower the lower bound of partitions 0..numPartitions-1
* @param upper the upper bound of partitions 0..numPartitions-1
* @param prefix the prefix that we have already checked against
* @param maxDepth the maximum depth we will build a trie for
* @return the trie node that will divide the splits correctly
*/
private TrieNode buildTrie(BinaryComparable[] splits, int lower,
int upper, byte[] prefix, int maxDepth) {
return buildTrieRec
(splits, lower, upper, prefix, maxDepth, new CarriedTrieNodeRef());
}
/**
* This is the core of buildTrie. The interface, and stub, above, just adds
* an empty CarriedTrieNodeRef.
*
* We build trie nodes in depth first order, which is also in key space
* order. Every leaf node is referenced as a slot in a parent internal
* node. If two adjacent slots [in the DFO] hold leaf nodes that have
* no split point, then they are not separated by a split point either,
* because there's no place in key space for that split point to exist.
*
* When that happens, the leaf nodes would be semantically identical, and
* we reuse the object. A single CarriedTrieNodeRef "ref" lives for the
* duration of the tree-walk. ref carries a potentially reusable, unsplit
* leaf node for such reuse until a leaf node with a split arises, which
* breaks the chain until we need to make a new unsplit leaf node.
*
* Note that this use of CarriedTrieNodeRef means that for internal nodes,
* for internal nodes if this code is modified in any way we still need
* to make or fill in the subnodes in key space order.
*/
private TrieNode buildTrieRec(BinaryComparable[] splits, int lower,
int upper, byte[] prefix, int maxDepth, CarriedTrieNodeRef ref) {
final int depth = prefix.length;
// We generate leaves for a single split point as well as for
// no split points.
if (depth >= maxDepth || lower >= upper - 1) {
// If we have two consecutive requests for an unsplit trie node, we
// can deliver the same one the second time.
if (lower == upper && ref.content != null) {
return ref.content;
}
TrieNode result = LeafTrieNodeFactory(depth, splits, lower, upper);
ref.content = lower == upper ? result : null;
return result;
}
InnerTrieNode result = new InnerTrieNode(depth);
byte[] trial = Arrays.copyOf(prefix, prefix.length + 1);
// append an extra byte on to the prefix
int currentBound = lower;
for(int ch = 0; ch < 0xFF; ++ch) {
trial[depth] = (byte) (ch + 1);
lower = currentBound;
while (currentBound < upper) {
if (splits[currentBound].compareTo(trial, 0, trial.length) >= 0) {
break;
}
currentBound += 1;
}
trial[depth] = (byte) ch;
result.child[0xFF & ch]
= buildTrieRec(splits, lower, currentBound, trial, maxDepth, ref);
}
// pick up the rest
trial[depth] = (byte)0xFF;
result.child[0xFF]
= buildTrieRec(splits, lower, currentBound, trial, maxDepth, ref);
return result;
}
}
| 15,330 | 36.211165 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import java.io.UnsupportedEncodingException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription;
/**
* Defines a way to partition keys based on certain key fields (also see
* {@link KeyFieldBasedComparator}.
* The key specification supported is of the form -k pos1[,pos2], where,
* pos is of the form f[.c][opts], where f is the number
* of the key field to use, and c is the number of the first character from
* the beginning of the field. Fields and character posns are numbered
* starting with 1; a character position of zero in pos2 indicates the
* field's last character. If '.c' is omitted from pos1, it defaults to 1
* (the beginning of the field); if omitted from pos2, it defaults to 0
* (the end of the field).
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class KeyFieldBasedPartitioner<K2, V2> extends Partitioner<K2, V2>
implements Configurable {
private static final Log LOG = LogFactory.getLog(
KeyFieldBasedPartitioner.class.getName());
public static String PARTITIONER_OPTIONS =
"mapreduce.partition.keypartitioner.options";
private int numOfPartitionFields;
private KeyFieldHelper keyFieldHelper = new KeyFieldHelper();
private Configuration conf;
public void setConf(Configuration conf) {
this.conf = conf;
keyFieldHelper = new KeyFieldHelper();
String keyFieldSeparator =
conf.get(MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR, "\t");
keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator);
if (conf.get("num.key.fields.for.partition") != null) {
LOG.warn("Using deprecated num.key.fields.for.partition. " +
"Use mapreduce.partition.keypartitioner.options instead");
this.numOfPartitionFields = conf.getInt("num.key.fields.for.partition",0);
keyFieldHelper.setKeyFieldSpec(1,numOfPartitionFields);
} else {
String option = conf.get(PARTITIONER_OPTIONS);
keyFieldHelper.parseOption(option);
}
}
public Configuration getConf() {
return conf;
}
public int getPartition(K2 key, V2 value, int numReduceTasks) {
byte[] keyBytes;
List <KeyDescription> allKeySpecs = keyFieldHelper.keySpecs();
if (allKeySpecs.size() == 0) {
return getPartition(key.toString().hashCode(), numReduceTasks);
}
try {
keyBytes = key.toString().getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("The current system does not " +
"support UTF-8 encoding!", e);
}
// return 0 if the key is empty
if (keyBytes.length == 0) {
return 0;
}
int []lengthIndicesFirst = keyFieldHelper.getWordLengths(keyBytes, 0,
keyBytes.length);
int currentHash = 0;
for (KeyDescription keySpec : allKeySpecs) {
int startChar = keyFieldHelper.getStartOffset(keyBytes, 0,
keyBytes.length, lengthIndicesFirst, keySpec);
// no key found! continue
if (startChar < 0) {
continue;
}
int endChar = keyFieldHelper.getEndOffset(keyBytes, 0, keyBytes.length,
lengthIndicesFirst, keySpec);
currentHash = hashCode(keyBytes, startChar, endChar,
currentHash);
}
return getPartition(currentHash, numReduceTasks);
}
protected int hashCode(byte[] b, int start, int end, int currentHash) {
for (int i = start; i <= end; i++) {
currentHash = 31*currentHash + b[i];
}
return currentHash;
}
protected int getPartition(int hash, int numReduceTasks) {
return (hash & Integer.MAX_VALUE) % numReduceTasks;
}
/**
* Set the {@link KeyFieldBasedPartitioner} options used for
* {@link Partitioner}
*
* @param keySpec the key specification of the form -k pos1[,pos2], where,
* pos is of the form f[.c][opts], where f is the number
* of the key field to use, and c is the number of the first character from
* the beginning of the field. Fields and character posns are numbered
* starting with 1; a character position of zero in pos2 indicates the
* field's last character. If '.c' is omitted from pos1, it defaults to 1
* (the beginning of the field); if omitted from pos2, it defaults to 0
* (the end of the field).
*/
public void setKeyFieldPartitionerOptions(Job job, String keySpec) {
job.getConfiguration().set(PARTITIONER_OPTIONS, keySpec);
}
/**
* Get the {@link KeyFieldBasedPartitioner} options
*/
public String getKeyFieldPartitionerOption(JobContext job) {
return job.getConfiguration().get(PARTITIONER_OPTIONS);
}
}
| 6,004 | 36.767296 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Partitioner;
/** Partition keys by their {@link Object#hashCode()}. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HashPartitioner<K, V> extends Partitioner<K, V> {
/** Use {@link Object#hashCode()} to partition. */
public int getPartition(K key, V value,
int numReduceTasks) {
return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
}
}
| 1,419 | 37.378378 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.jobcontrol;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.jobcontrol.Job;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob.State;
import org.apache.hadoop.util.StringUtils;
/**
* This class encapsulates a set of MapReduce jobs and its dependency.
*
* It tracks the states of the jobs by placing them into different tables
* according to their states.
*
* This class provides APIs for the client app to add a job to the group
* and to get the jobs in the group in different states. When a job is
* added, an ID unique to the group is assigned to the job.
*
* This class has a thread that submits jobs when they become ready,
* monitors the states of the running jobs, and updates the states of jobs
* based on the state changes of their depending jobs states. The class
* provides APIs for suspending/resuming the thread, and
* for stopping the thread.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class JobControl implements Runnable {
private static final Log LOG = LogFactory.getLog(JobControl.class);
// The thread can be in one of the following state
public static enum ThreadState {RUNNING, SUSPENDED,STOPPED, STOPPING, READY};
private ThreadState runnerState; // the thread state
private LinkedList<ControlledJob> jobsInProgress = new LinkedList<ControlledJob>();
private LinkedList<ControlledJob> successfulJobs = new LinkedList<ControlledJob>();
private LinkedList<ControlledJob> failedJobs = new LinkedList<ControlledJob>();
private long nextJobID;
private String groupName;
/**
* Construct a job control for a group of jobs.
* @param groupName a name identifying this group
*/
public JobControl(String groupName) {
this.nextJobID = -1;
this.groupName = groupName;
this.runnerState = ThreadState.READY;
}
private static List<ControlledJob> toList(
LinkedList<ControlledJob> jobs) {
ArrayList<ControlledJob> retv = new ArrayList<ControlledJob>();
for (ControlledJob job : jobs) {
retv.add(job);
}
return retv;
}
synchronized private List<ControlledJob> getJobsIn(State state) {
LinkedList<ControlledJob> l = new LinkedList<ControlledJob>();
for(ControlledJob j: jobsInProgress) {
if(j.getJobState() == state) {
l.add(j);
}
}
return l;
}
/**
* @return the jobs in the waiting state
*/
public List<ControlledJob> getWaitingJobList() {
return getJobsIn(State.WAITING);
}
/**
* @return the jobs in the running state
*/
public List<ControlledJob> getRunningJobList() {
return getJobsIn(State.RUNNING);
}
/**
* @return the jobs in the ready state
*/
public List<ControlledJob> getReadyJobsList() {
return getJobsIn(State.READY);
}
/**
* @return the jobs in the success state
*/
synchronized public List<ControlledJob> getSuccessfulJobList() {
return toList(this.successfulJobs);
}
synchronized public List<ControlledJob> getFailedJobList() {
return toList(this.failedJobs);
}
private String getNextJobID() {
nextJobID += 1;
return this.groupName + this.nextJobID;
}
/**
* Add a new controlled job.
* @param aJob the new controlled job
*/
synchronized public String addJob(ControlledJob aJob) {
String id = this.getNextJobID();
aJob.setJobID(id);
aJob.setJobState(State.WAITING);
jobsInProgress.add(aJob);
return id;
}
/**
* Add a new job.
* @param aJob the new job
*/
synchronized public String addJob(Job aJob) {
return addJob((ControlledJob) aJob);
}
/**
* Add a collection of jobs
*
* @param jobs
*/
public void addJobCollection(Collection<ControlledJob> jobs) {
for (ControlledJob job : jobs) {
addJob(job);
}
}
/**
* @return the thread state
*/
public ThreadState getThreadState() {
return this.runnerState;
}
/**
* set the thread state to STOPPING so that the
* thread will stop when it wakes up.
*/
public void stop() {
this.runnerState = ThreadState.STOPPING;
}
/**
* suspend the running thread
*/
public void suspend () {
if (this.runnerState == ThreadState.RUNNING) {
this.runnerState = ThreadState.SUSPENDED;
}
}
/**
* resume the suspended thread
*/
public void resume () {
if (this.runnerState == ThreadState.SUSPENDED) {
this.runnerState = ThreadState.RUNNING;
}
}
synchronized public boolean allFinished() {
return jobsInProgress.isEmpty();
}
/**
* The main loop for the thread.
* The loop does the following:
* Check the states of the running jobs
* Update the states of waiting jobs
* Submit the jobs in ready state
*/
public void run() {
try {
this.runnerState = ThreadState.RUNNING;
while (true) {
while (this.runnerState == ThreadState.SUSPENDED) {
try {
Thread.sleep(5000);
}
catch (Exception e) {
//TODO the thread was interrupted, do something!!!
}
}
synchronized(this) {
Iterator<ControlledJob> it = jobsInProgress.iterator();
while(it.hasNext()) {
ControlledJob j = it.next();
LOG.debug("Checking state of job "+j);
switch(j.checkState()) {
case SUCCESS:
successfulJobs.add(j);
it.remove();
break;
case FAILED:
case DEPENDENT_FAILED:
failedJobs.add(j);
it.remove();
break;
case READY:
j.submit();
break;
case RUNNING:
case WAITING:
//Do Nothing
break;
}
}
}
if (this.runnerState != ThreadState.RUNNING &&
this.runnerState != ThreadState.SUSPENDED) {
break;
}
try {
Thread.sleep(5000);
}
catch (Exception e) {
//TODO the thread was interrupted, do something!!!
}
if (this.runnerState != ThreadState.RUNNING &&
this.runnerState != ThreadState.SUSPENDED) {
break;
}
}
}catch(Throwable t) {
LOG.error("Error while trying to run jobs.",t);
//Mark all jobs as failed because we got something bad.
failAllJobs(t);
}
this.runnerState = ThreadState.STOPPED;
}
synchronized private void failAllJobs(Throwable t) {
String message = "Unexpected System Error Occured: "+
StringUtils.stringifyException(t);
Iterator<ControlledJob> it = jobsInProgress.iterator();
while(it.hasNext()) {
ControlledJob j = it.next();
try {
j.failJob(message);
} catch (IOException e) {
LOG.error("Error while tyring to clean up "+j.getJobName(), e);
} catch (InterruptedException e) {
LOG.error("Error while tyring to clean up "+j.getJobName(), e);
} finally {
failedJobs.add(j);
it.remove();
}
}
}
}
| 8,340 | 27.565068 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.jobcontrol;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.StringUtils;
/**
* This class encapsulates a MapReduce job and its dependency. It monitors
* the states of the depending jobs and updates the state of this job.
* A job starts in the WAITING state. If it does not have any depending jobs,
* or all of the depending jobs are in SUCCESS state, then the job state
* will become READY. If any depending jobs fail, the job will fail too.
* When in READY state, the job can be submitted to Hadoop for execution, with
* the state changing into RUNNING state. From RUNNING state, the job
* can get into SUCCESS or FAILED state, depending
* the status of the job execution.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ControlledJob {
private static final Log LOG = LogFactory.getLog(ControlledJob.class);
// A job will be in one of the following states
public static enum State {SUCCESS, WAITING, RUNNING, READY, FAILED,
DEPENDENT_FAILED};
public static final String CREATE_DIR = "mapreduce.jobcontrol.createdir.ifnotexist";
private State state;
private String controlID; // assigned and used by JobControl class
private Job job; // mapreduce job to be executed.
// some info for human consumption, e.g. the reason why the job failed
private String message;
// the jobs the current job depends on
private List<ControlledJob> dependingJobs;
/**
* Construct a job.
* @param job a mapreduce job to be executed.
* @param dependingJobs an array of jobs the current job depends on
*/
public ControlledJob(Job job, List<ControlledJob> dependingJobs)
throws IOException {
this.job = job;
this.dependingJobs = dependingJobs;
this.state = State.WAITING;
this.controlID = "unassigned";
this.message = "just initialized";
}
/**
* Construct a job.
*
* @param conf mapred job configuration representing a job to be executed.
* @throws IOException
*/
public ControlledJob(Configuration conf) throws IOException {
this(Job.getInstance(conf), null);
}
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("job name:\t").append(this.job.getJobName()).append("\n");
sb.append("job id:\t").append(this.controlID).append("\n");
sb.append("job state:\t").append(this.state).append("\n");
sb.append("job mapred id:\t").append(this.job.getJobID()).append("\n");
sb.append("job message:\t").append(this.message).append("\n");
if (this.dependingJobs == null || this.dependingJobs.size() == 0) {
sb.append("job has no depending job:\t").append("\n");
} else {
sb.append("job has ").append(this.dependingJobs.size()).
append(" dependeng jobs:\n");
for (int i = 0; i < this.dependingJobs.size(); i++) {
sb.append("\t depending job ").append(i).append(":\t");
sb.append((this.dependingJobs.get(i)).getJobName()).append("\n");
}
}
return sb.toString();
}
/**
* @return the job name of this job
*/
public String getJobName() {
return job.getJobName();
}
/**
* Set the job name for this job.
* @param jobName the job name
*/
public void setJobName(String jobName) {
job.setJobName(jobName);
}
/**
* @return the job ID of this job assigned by JobControl
*/
public String getJobID() {
return this.controlID;
}
/**
* Set the job ID for this job.
* @param id the job ID
*/
public void setJobID(String id) {
this.controlID = id;
}
/**
* @return the mapred ID of this job as assigned by the mapred framework.
*/
public synchronized JobID getMapredJobId() {
return this.job.getJobID();
}
/**
* @return the mapreduce job
*/
public synchronized Job getJob() {
return this.job;
}
/**
* Set the mapreduce job
* @param job the mapreduce job for this job.
*/
public synchronized void setJob(Job job) {
this.job = job;
}
/**
* @return the state of this job
*/
public synchronized State getJobState() {
return this.state;
}
/**
* Set the state for this job.
* @param state the new state for this job.
*/
protected synchronized void setJobState(State state) {
this.state = state;
}
/**
* @return the message of this job
*/
public synchronized String getMessage() {
return this.message;
}
/**
* Set the message for this job.
* @param message the message for this job.
*/
public synchronized void setMessage(String message) {
this.message = message;
}
/**
* @return the depending jobs of this job
*/
public List<ControlledJob> getDependentJobs() {
return this.dependingJobs;
}
/**
* Add a job to this jobs' dependency list.
* Dependent jobs can only be added while a Job
* is waiting to run, not during or afterwards.
*
* @param dependingJob Job that this Job depends on.
* @return <tt>true</tt> if the Job was added.
*/
public synchronized boolean addDependingJob(ControlledJob dependingJob) {
if (this.state == State.WAITING) { //only allowed to add jobs when waiting
if (this.dependingJobs == null) {
this.dependingJobs = new ArrayList<ControlledJob>();
}
return this.dependingJobs.add(dependingJob);
} else {
return false;
}
}
/**
* @return true if this job is in a complete state
*/
public synchronized boolean isCompleted() {
return this.state == State.FAILED ||
this.state == State.DEPENDENT_FAILED ||
this.state == State.SUCCESS;
}
/**
* @return true if this job is in READY state
*/
public synchronized boolean isReady() {
return this.state == State.READY;
}
public void killJob() throws IOException, InterruptedException {
job.killJob();
}
public synchronized void failJob(String message) throws IOException, InterruptedException {
try {
if(job != null && this.state == State.RUNNING) {
job.killJob();
}
} finally {
this.state = State.FAILED;
this.message = message;
}
}
/**
* Check the state of this running job. The state may
* remain the same, become SUCCESS or FAILED.
*/
private void checkRunningState() throws IOException, InterruptedException {
try {
if (job.isComplete()) {
if (job.isSuccessful()) {
this.state = State.SUCCESS;
} else {
this.state = State.FAILED;
this.message = "Job failed!";
}
}
} catch (IOException ioe) {
this.state = State.FAILED;
this.message = StringUtils.stringifyException(ioe);
try {
if (job != null) {
job.killJob();
}
} catch (IOException e) {}
}
}
/**
* Check and update the state of this job. The state changes
* depending on its current state and the states of the depending jobs.
*/
synchronized State checkState() throws IOException, InterruptedException {
if (this.state == State.RUNNING) {
checkRunningState();
}
if (this.state != State.WAITING) {
return this.state;
}
if (this.dependingJobs == null || this.dependingJobs.size() == 0) {
this.state = State.READY;
return this.state;
}
ControlledJob pred = null;
int n = this.dependingJobs.size();
for (int i = 0; i < n; i++) {
pred = this.dependingJobs.get(i);
State s = pred.checkState();
if (s == State.WAITING || s == State.READY || s == State.RUNNING) {
break; // a pred is still not completed, continue in WAITING
// state
}
if (s == State.FAILED || s == State.DEPENDENT_FAILED) {
this.state = State.DEPENDENT_FAILED;
this.message = "depending job " + i + " with jobID "
+ pred.getJobID() + " failed. " + pred.getMessage();
break;
}
// pred must be in success state
if (i == n - 1) {
this.state = State.READY;
}
}
return this.state;
}
/**
* Submit this job to mapred. The state becomes RUNNING if submission
* is successful, FAILED otherwise.
*/
protected synchronized void submit() {
try {
Configuration conf = job.getConfiguration();
if (conf.getBoolean(CREATE_DIR, false)) {
FileSystem fs = FileSystem.get(conf);
Path inputPaths[] = FileInputFormat.getInputPaths(job);
for (int i = 0; i < inputPaths.length; i++) {
if (!fs.exists(inputPaths[i])) {
try {
fs.mkdirs(inputPaths[i]);
} catch (IOException e) {
}
}
}
}
job.submit();
this.state = State.RUNNING;
} catch (Exception ioe) {
LOG.info(getJobName()+" got an error while submitting ",ioe);
this.state = State.FAILED;
this.message = StringUtils.stringifyException(ioe);
}
}
}
| 10,349 | 29 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.PriorityQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A RecordReader that can effect joins of RecordReaders sharing a common key
* type and partitioning.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class CompositeRecordReader<
K extends WritableComparable<?>, // key type
V extends Writable, // accepts RecordReader<K,V> as children
X extends Writable> // emits Writables of this type
extends ComposableRecordReader<K, X>
implements Configurable {
private int id;
protected Configuration conf;
private final ResetableIterator<X> EMPTY = new ResetableIterator.EMPTY<X>();
private WritableComparator cmp;
@SuppressWarnings("unchecked")
protected Class<? extends WritableComparable> keyclass = null;
private PriorityQueue<ComposableRecordReader<K,?>> q;
protected final JoinCollector jc;
protected final ComposableRecordReader<K,? extends V>[] kids;
protected abstract boolean combine(Object[] srcs, TupleWritable value);
protected K key;
protected X value;
/**
* Create a RecordReader with <tt>capacity</tt> children to position
* <tt>id</tt> in the parent reader.
* The id of a root CompositeRecordReader is -1 by convention, but relying
* on this is not recommended.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public CompositeRecordReader(int id, int capacity,
Class<? extends WritableComparator> cmpcl)
throws IOException {
assert capacity > 0 : "Invalid capacity";
this.id = id;
if (null != cmpcl) {
cmp = ReflectionUtils.newInstance(cmpcl, null);
q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
new Comparator<ComposableRecordReader<K,?>>() {
public int compare(ComposableRecordReader<K,?> o1,
ComposableRecordReader<K,?> o2) {
return cmp.compare(o1.key(), o2.key());
}
});
}
jc = new JoinCollector(capacity);
kids = new ComposableRecordReader[capacity];
}
@SuppressWarnings("unchecked")
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
if (kids != null) {
for (int i = 0; i < kids.length; ++i) {
kids[i].initialize(((CompositeInputSplit)split).get(i), context);
if (kids[i].key() == null) {
continue;
}
// get keyclass
if (keyclass == null) {
keyclass = kids[i].createKey().getClass().
asSubclass(WritableComparable.class);
}
// create priority queue
if (null == q) {
cmp = WritableComparator.get(keyclass, conf);
q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
new Comparator<ComposableRecordReader<K,?>>() {
public int compare(ComposableRecordReader<K,?> o1,
ComposableRecordReader<K,?> o2) {
return cmp.compare(o1.key(), o2.key());
}
});
}
// Explicit check for key class agreement
if (!keyclass.equals(kids[i].key().getClass())) {
throw new ClassCastException("Child key classes fail to agree");
}
// add the kid to priority queue if it has any elements
if (kids[i].hasNext()) {
q.add(kids[i]);
}
}
}
}
/**
* Return the position in the collector this class occupies.
*/
public int id() {
return id;
}
/**
* {@inheritDoc}
*/
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* {@inheritDoc}
*/
public Configuration getConf() {
return conf;
}
/**
* Return sorted list of RecordReaders for this composite.
*/
protected PriorityQueue<ComposableRecordReader<K,?>> getRecordReaderQueue() {
return q;
}
/**
* Return comparator defining the ordering for RecordReaders in this
* composite.
*/
protected WritableComparator getComparator() {
return cmp;
}
/**
* Add a RecordReader to this collection.
* The id() of a RecordReader determines where in the Tuple its
* entry will appear. Adding RecordReaders with the same id has
* undefined behavior.
*/
public void add(ComposableRecordReader<K,? extends V> rr)
throws IOException, InterruptedException {
kids[rr.id()] = rr;
}
/**
* Collector for join values.
* This accumulates values for a given key from the child RecordReaders. If
* one or more child RR contain duplicate keys, this will emit the cross
* product of the associated values until exhausted.
*/
public class JoinCollector {
private K key;
private ResetableIterator<X>[] iters;
private int pos = -1;
private boolean first = true;
/**
* Construct a collector capable of handling the specified number of
* children.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public JoinCollector(int card) {
iters = new ResetableIterator[card];
for (int i = 0; i < iters.length; ++i) {
iters[i] = EMPTY;
}
}
/**
* Register a given iterator at position id.
*/
public void add(int id, ResetableIterator<X> i)
throws IOException {
iters[id] = i;
}
/**
* Return the key associated with this collection.
*/
public K key() {
return key;
}
/**
* Codify the contents of the collector to be iterated over.
* When this is called, all RecordReaders registered for this
* key should have added ResetableIterators.
*/
public void reset(K key) {
this.key = key;
first = true;
pos = iters.length - 1;
for (int i = 0; i < iters.length; ++i) {
iters[i].reset();
}
}
/**
* Clear all state information.
*/
public void clear() {
key = null;
pos = -1;
for (int i = 0; i < iters.length; ++i) {
iters[i].clear();
iters[i] = EMPTY;
}
}
/**
* Returns false if exhausted or if reset(K) has not been called.
*/
public boolean hasNext() {
return !(pos < 0);
}
/**
* Populate Tuple from iterators.
* It should be the case that, given iterators i_1...i_n over values from
* sources s_1...s_n sharing key k, repeated calls to next should yield
* I x I.
*/
@SuppressWarnings("unchecked") // No static type info on Tuples
protected boolean next(TupleWritable val) throws IOException {
if (first) {
int i = -1;
for (pos = 0; pos < iters.length; ++pos) {
if (iters[pos].hasNext() && iters[pos].next((X)val.get(pos))) {
i = pos;
val.setWritten(i);
}
}
pos = i;
first = false;
if (pos < 0) {
clear();
return false;
}
return true;
}
while (0 <= pos && !(iters[pos].hasNext() &&
iters[pos].next((X)val.get(pos)))) {
--pos;
}
if (pos < 0) {
clear();
return false;
}
val.setWritten(pos);
for (int i = 0; i < pos; ++i) {
if (iters[i].replay((X)val.get(i))) {
val.setWritten(i);
}
}
while (pos + 1 < iters.length) {
++pos;
iters[pos].reset();
if (iters[pos].hasNext() && iters[pos].next((X)val.get(pos))) {
val.setWritten(pos);
}
}
return true;
}
/**
* Replay the last Tuple emitted.
*/
@SuppressWarnings("unchecked") // No static typeinfo on Tuples
public boolean replay(TupleWritable val) throws IOException {
// The last emitted tuple might have drawn on an empty source;
// it can't be cleared prematurely, b/c there may be more duplicate
// keys in iterator positions < pos
assert !first;
boolean ret = false;
for (int i = 0; i < iters.length; ++i) {
if (iters[i].replay((X)val.get(i))) {
val.setWritten(i);
ret = true;
}
}
return ret;
}
/**
* Close all child iterators.
*/
public void close() throws IOException {
for (int i = 0; i < iters.length; ++i) {
iters[i].close();
}
}
/**
* Write the next value into key, value as accepted by the operation
* associated with this set of RecordReaders.
*/
public boolean flush(TupleWritable value) throws IOException {
while (hasNext()) {
value.clearWritten();
if (next(value) && combine(kids, value)) {
return true;
}
}
return false;
}
}
/**
* Return the key for the current join or the value at the top of the
* RecordReader heap.
*/
public K key() {
if (jc.hasNext()) {
return jc.key();
}
if (!q.isEmpty()) {
return q.peek().key();
}
return null;
}
/**
* Clone the key at the top of this RR into the given object.
*/
public void key(K key) throws IOException {
ReflectionUtils.copy(conf, key(), key);
}
public K getCurrentKey() {
return key;
}
/**
* Return true if it is possible that this could emit more values.
*/
public boolean hasNext() {
return jc.hasNext() || !q.isEmpty();
}
/**
* Pass skip key to child RRs.
*/
public void skip(K key) throws IOException, InterruptedException {
ArrayList<ComposableRecordReader<K,?>> tmp =
new ArrayList<ComposableRecordReader<K,?>>();
while (!q.isEmpty() && cmp.compare(q.peek().key(), key) <= 0) {
tmp.add(q.poll());
}
for (ComposableRecordReader<K,?> rr : tmp) {
rr.skip(key);
if (rr.hasNext()) {
q.add(rr);
}
}
}
/**
* Obtain an iterator over the child RRs apropos of the value type
* ultimately emitted from this join.
*/
protected abstract ResetableIterator<X> getDelegate();
/**
* If key provided matches that of this Composite, give JoinCollector
* iterator over values it may emit.
*/
@SuppressWarnings("unchecked") // No values from static EMPTY class
@Override
public void accept(CompositeRecordReader.JoinCollector jc, K key)
throws IOException, InterruptedException {
if (hasNext() && 0 == cmp.compare(key, key())) {
fillJoinCollector(createKey());
jc.add(id, getDelegate());
return;
}
jc.add(id, EMPTY);
}
/**
* For all child RRs offering the key provided, obtain an iterator
* at that position in the JoinCollector.
*/
protected void fillJoinCollector(K iterkey)
throws IOException, InterruptedException {
if (!q.isEmpty()) {
q.peek().key(iterkey);
while (0 == cmp.compare(q.peek().key(), iterkey)) {
ComposableRecordReader<K,?> t = q.poll();
t.accept(jc, iterkey);
if (t.hasNext()) {
q.add(t);
} else if (q.isEmpty()) {
return;
}
}
}
}
/**
* Implement Comparable contract (compare key of join or head of heap
* with that of another).
*/
public int compareTo(ComposableRecordReader<K,?> other) {
return cmp.compare(key(), other.key());
}
/**
* Create a new key common to all child RRs.
* @throws ClassCastException if key classes differ.
*/
@SuppressWarnings("unchecked")
protected K createKey() {
if (keyclass == null || keyclass.equals(NullWritable.class)) {
return (K) NullWritable.get();
}
return (K) ReflectionUtils.newInstance(keyclass, getConf());
}
/**
* Create a value to be used internally for joins.
*/
protected TupleWritable createTupleWritable() {
Writable[] vals = new Writable[kids.length];
for (int i = 0; i < vals.length; ++i) {
vals[i] = kids[i].createValue();
}
return new TupleWritable(vals);
}
/** {@inheritDoc} */
public X getCurrentValue()
throws IOException, InterruptedException {
return value;
}
/**
* Close all child RRs.
*/
public void close() throws IOException {
if (kids != null) {
for (RecordReader<K,? extends Writable> rr : kids) {
rr.close();
}
}
if (jc != null) {
jc.close();
}
}
/**
* Report progress as the minimum of all child RR progress.
*/
public float getProgress() throws IOException, InterruptedException {
float ret = 1.0f;
for (RecordReader<K,? extends Writable> rr : kids) {
ret = Math.min(ret, rr.getProgress());
}
return ret;
}
}
| 14,087 | 27.289157 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/StreamBackedIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* This class provides an implementation of ResetableIterator. This
* implementation uses a byte array to store elements added to it.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class StreamBackedIterator<X extends Writable>
implements ResetableIterator<X> {
private static class ReplayableByteInputStream extends ByteArrayInputStream {
public ReplayableByteInputStream(byte[] arr) {
super(arr);
}
public void resetStream() {
mark = 0;
reset();
}
}
private ByteArrayOutputStream outbuf = new ByteArrayOutputStream();
private DataOutputStream outfbuf = new DataOutputStream(outbuf);
private ReplayableByteInputStream inbuf;
private DataInputStream infbuf;
public StreamBackedIterator() { }
public boolean hasNext() {
return infbuf != null && inbuf.available() > 0;
}
public boolean next(X val) throws IOException {
if (hasNext()) {
inbuf.mark(0);
val.readFields(infbuf);
return true;
}
return false;
}
public boolean replay(X val) throws IOException {
inbuf.reset();
if (0 == inbuf.available())
return false;
val.readFields(infbuf);
return true;
}
public void reset() {
if (null != outfbuf) {
inbuf = new ReplayableByteInputStream(outbuf.toByteArray());
infbuf = new DataInputStream(inbuf);
outfbuf = null;
}
inbuf.resetStream();
}
public void add(X item) throws IOException {
item.write(outfbuf);
}
public void close() throws IOException {
if (null != infbuf)
infbuf.close();
if (null != outfbuf)
outfbuf.close();
}
public void clear() {
if (null != inbuf)
inbuf.resetStream();
outbuf.reset();
outfbuf = new DataOutputStream(outbuf);
}
}
| 2,963 | 27.5 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import java.util.ArrayList;
import java.util.PriorityQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Prefer the "rightmost" data source for this key.
* For example, <tt>override(S1,S2,S3)</tt> will prefer values
* from S3 over S2, and values from S2 over S1 for all keys
* emitted from all sources.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class OverrideRecordReader<K extends WritableComparable<?>,
V extends Writable>
extends MultiFilterRecordReader<K,V> {
OverrideRecordReader(int id, Configuration conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, conf, capacity, cmpcl);
}
private Class<? extends Writable> valueclass = null;
/**
* Emit the value with the highest position in the tuple.
*/
@SuppressWarnings("unchecked") // No static typeinfo on Tuples
protected V emit(TupleWritable dst) {
return (V) dst.iterator().next();
}
@SuppressWarnings("unchecked") // Explicit check for value class agreement
public V createValue() {
if (null == valueclass) {
Class<?> cls = kids[kids.length -1].createValue().getClass();
for (int i = kids.length -1; cls.equals(NullWritable.class); i--) {
cls = kids[i].createValue().getClass();
}
valueclass = cls.asSubclass(Writable.class);
}
if (valueclass.equals(NullWritable.class)) {
return (V) NullWritable.get();
}
return (V) ReflectionUtils.newInstance(valueclass, null);
}
/**
* Instead of filling the JoinCollector with iterators from all
* data sources, fill only the rightmost for this key.
* This not only saves space by discarding the other sources, but
* it also emits the number of key-value pairs in the preferred
* RecordReader instead of repeating that stream n times, where
* n is the cardinality of the cross product of the discarded
* streams for the given key.
*/
protected void fillJoinCollector(K iterkey)
throws IOException, InterruptedException {
final PriorityQueue<ComposableRecordReader<K,?>> q =
getRecordReaderQueue();
if (q != null && !q.isEmpty()) {
int highpos = -1;
ArrayList<ComposableRecordReader<K,?>> list =
new ArrayList<ComposableRecordReader<K,?>>(kids.length);
q.peek().key(iterkey);
final WritableComparator cmp = getComparator();
while (0 == cmp.compare(q.peek().key(), iterkey)) {
ComposableRecordReader<K,?> t = q.poll();
if (-1 == highpos || list.get(highpos).id() < t.id()) {
highpos = list.size();
}
list.add(t);
if (q.isEmpty())
break;
}
ComposableRecordReader<K,?> t = list.remove(highpos);
t.accept(jc, iterkey);
for (ComposableRecordReader<K,?> rr : list) {
rr.skip(iterkey);
}
list.add(t);
for (ComposableRecordReader<K,?> rr : list) {
if (rr.hasNext()) {
q.add(rr);
}
}
}
}
}
| 4,261 | 35.118644 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/InnerJoinRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
/**
* Full inner join.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InnerJoinRecordReader<K extends WritableComparable<?>>
extends JoinRecordReader<K> {
InnerJoinRecordReader(int id, Configuration conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, conf, capacity, cmpcl);
}
/**
* Return true iff the tuple is full (all data sources contain this key).
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
assert srcs.length == dst.size();
for (int i = 0; i < srcs.length; ++i) {
if (!dst.has(i)) {
return false;
}
}
return true;
}
}
| 1,826 | 32.218182 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* An InputFormat capable of performing joins over a set of data sources sorted
* and partitioned the same way.
*
* A user may define new join types by setting the property
* <tt>mapreduce.join.define.<ident></tt> to a classname.
* In the expression <tt>mapreduce.join.expr</tt>, the identifier will be
* assumed to be a ComposableRecordReader.
* <tt>mapreduce.join.keycomparator</tt> can be a classname used to compare
* keys in the join.
* @see #setFormat
* @see JoinRecordReader
* @see MultiFilterRecordReader
*/
@SuppressWarnings("unchecked")
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CompositeInputFormat<K extends WritableComparable>
extends InputFormat<K, TupleWritable> {
public static final String JOIN_EXPR = "mapreduce.join.expr";
public static final String JOIN_COMPARATOR = "mapreduce.join.keycomparator";
// expression parse tree to which IF requests are proxied
private Parser.Node root;
public CompositeInputFormat() { }
/**
* Interpret a given string as a composite expression.
* {@code
* func ::= <ident>([<func>,]*<func>)
* func ::= tbl(<class>,"<path>")
* class ::= @see java.lang.Class#forName(java.lang.String)
* path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
* }
* Reads expression from the <tt>mapreduce.join.expr</tt> property and
* user-supplied join types from <tt>mapreduce.join.define.<ident></tt>
* types. Paths supplied to <tt>tbl</tt> are given as input paths to the
* InputFormat class listed.
* @see #compose(java.lang.String, java.lang.Class, java.lang.String...)
*/
public void setFormat(Configuration conf) throws IOException {
addDefaults();
addUserIdentifiers(conf);
root = Parser.parse(conf.get(JOIN_EXPR, null), conf);
}
/**
* Adds the default set of identifiers to the parser.
*/
protected void addDefaults() {
try {
Parser.CNode.addIdentifier("inner", InnerJoinRecordReader.class);
Parser.CNode.addIdentifier("outer", OuterJoinRecordReader.class);
Parser.CNode.addIdentifier("override", OverrideRecordReader.class);
Parser.WNode.addIdentifier("tbl", WrappedRecordReader.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException("FATAL: Failed to init defaults", e);
}
}
/**
* Inform the parser of user-defined types.
*/
private void addUserIdentifiers(Configuration conf) throws IOException {
Pattern x = Pattern.compile("^mapreduce\\.join\\.define\\.(\\w+)$");
for (Map.Entry<String,String> kv : conf) {
Matcher m = x.matcher(kv.getKey());
if (m.matches()) {
try {
Parser.CNode.addIdentifier(m.group(1),
conf.getClass(m.group(0), null, ComposableRecordReader.class));
} catch (NoSuchMethodException e) {
throw new IOException("Invalid define for " + m.group(1), e);
}
}
}
}
/**
* Build a CompositeInputSplit from the child InputFormats by assigning the
* ith split from each child to the ith composite split.
*/
@SuppressWarnings("unchecked")
public List<InputSplit> getSplits(JobContext job)
throws IOException, InterruptedException {
setFormat(job.getConfiguration());
job.getConfiguration().setLong("mapreduce.input.fileinputformat.split.minsize", Long.MAX_VALUE);
return root.getSplits(job);
}
/**
* Construct a CompositeRecordReader for the children of this InputFormat
* as defined in the init expression.
* The outermost join need only be composable, not necessarily a composite.
* Mandating TupleWritable isn't strictly correct.
*/
@SuppressWarnings("unchecked") // child types unknown
public RecordReader<K,TupleWritable> createRecordReader(InputSplit split,
TaskAttemptContext taskContext)
throws IOException, InterruptedException {
setFormat(taskContext.getConfiguration());
return root.createRecordReader(split, taskContext);
}
/**
* Convenience method for constructing composite formats.
* Given InputFormat class (inf), path (p) return:
* {@code tbl(<inf>, <p>) }
*/
public static String compose(Class<? extends InputFormat> inf,
String path) {
return compose(inf.getName().intern(), path,
new StringBuffer()).toString();
}
/**
* Convenience method for constructing composite formats.
* Given operation (op), Object class (inf), set of paths (p) return:
* {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }
*/
public static String compose(String op,
Class<? extends InputFormat> inf, String... path) {
final String infname = inf.getName();
StringBuffer ret = new StringBuffer(op + '(');
for (String p : path) {
compose(infname, p, ret);
ret.append(',');
}
ret.setCharAt(ret.length() - 1, ')');
return ret.toString();
}
/**
* Convenience method for constructing composite formats.
* Given operation (op), Object class (inf), set of paths (p) return:
* {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }
*/
public static String compose(String op,
Class<? extends InputFormat> inf, Path... path) {
ArrayList<String> tmp = new ArrayList<String>(path.length);
for (Path p : path) {
tmp.add(p.toString());
}
return compose(op, inf, tmp.toArray(new String[0]));
}
private static StringBuffer compose(String inf, String path,
StringBuffer sb) {
sb.append("tbl(" + inf + ",\"");
sb.append(path);
sb.append("\")");
return sb;
}
}
| 7,117 | 35.316327 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ComposableInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* Refinement of InputFormat requiring implementors to provide
* ComposableRecordReader instead of RecordReader.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ComposableInputFormat<K extends WritableComparable<?>,
V extends Writable>
extends InputFormat<K,V> {
public abstract ComposableRecordReader<K,V> createRecordReader(
InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException;
}
| 1,754 | 37.152174 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OuterJoinRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
/**
* Full outer join.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class OuterJoinRecordReader<K extends WritableComparable<?>>
extends JoinRecordReader<K> {
OuterJoinRecordReader(int id, Configuration conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, conf, capacity, cmpcl);
}
/**
* Emit everything from the collector.
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
assert srcs.length == dst.size();
return true;
}
}
| 1,686 | 32.74 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ComposableRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.RecordReader;
/**
* Additional operations required of a RecordReader to participate in a join.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ComposableRecordReader<K extends WritableComparable<?>,
V extends Writable>
extends RecordReader<K,V>
implements Comparable<ComposableRecordReader<K,?>> {
/**
* Return the position in the collector this class occupies.
*/
abstract int id();
/**
* Return the key this RecordReader would supply on a call to next(K,V)
*/
abstract K key();
/**
* Clone the key at the head of this RecordReader into the object provided.
*/
abstract void key(K key) throws IOException;
/**
* Create instance of key.
*/
abstract K createKey();
/**
* Create instance of value.
*/
abstract V createValue();
/**
* Returns true if the stream is not empty, but provides no guarantee that
* a call to next(K,V) will succeed.
*/
abstract boolean hasNext();
/**
* Skip key-value pairs with keys less than or equal to the key provided.
*/
abstract void skip(K key) throws IOException, InterruptedException;
/**
* While key-value pairs from this RecordReader match the given key, register
* them with the JoinCollector provided.
*/
@SuppressWarnings("unchecked")
abstract void accept(CompositeRecordReader.JoinCollector jc, K key)
throws IOException, InterruptedException;
}
| 2,602 | 30.361446 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ArrayListBackedIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This class provides an implementation of ResetableIterator. The
* implementation uses an {@link java.util.ArrayList} to store elements
* added to it, replaying them as requested.
* Prefer {@link StreamBackedIterator}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ArrayListBackedIterator<X extends Writable>
implements ResetableIterator<X> {
private Iterator<X> iter;
private ArrayList<X> data;
private X hold = null;
private Configuration conf = new Configuration();
public ArrayListBackedIterator() {
this(new ArrayList<X>());
}
public ArrayListBackedIterator(ArrayList<X> data) {
this.data = data;
this.iter = this.data.iterator();
}
public boolean hasNext() {
return iter.hasNext();
}
public boolean next(X val) throws IOException {
if (iter.hasNext()) {
ReflectionUtils.copy(conf, iter.next(), val);
if (null == hold) {
hold = WritableUtils.clone(val, null);
} else {
ReflectionUtils.copy(conf, val, hold);
}
return true;
}
return false;
}
public boolean replay(X val) throws IOException {
ReflectionUtils.copy(conf, hold, val);
return true;
}
public void reset() {
iter = data.iterator();
}
public void add(X item) throws IOException {
data.add(WritableUtils.clone(item, null));
}
public void close() throws IOException {
iter = null;
data = null;
}
public void clear() {
data.clear();
reset();
}
}
| 2,734 | 27.489583 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/Parser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.CharArrayReader;
import java.io.IOException;
import java.io.StreamTokenizer;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.Stack;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Very simple shift-reduce parser for join expressions.
*
* This should be sufficient for the user extension permitted now, but ought to
* be replaced with a parser generator if more complex grammars are supported.
* In particular, this "shift-reduce" parser has no states. Each set
* of formals requires a different internal node type, which is responsible for
* interpreting the list of tokens it receives. This is sufficient for the
* current grammar, but it has several annoying properties that might inhibit
* extension. In particular, parenthesis are always function calls; an
* algebraic or filter grammar would not only require a node type, but must
* also work around the internals of this parser.
*
* For most other cases, adding classes to the hierarchy- particularly by
* extending JoinRecordReader and MultiFilterRecordReader- is fairly
* straightforward. One need only override the relevant method(s) (usually only
* {@link CompositeRecordReader#combine}) and include a property to map its
* value to an identifier in the parser.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Parser {
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum TType { CIF, IDENT, COMMA, LPAREN, RPAREN, QUOT, NUM, }
/**
* Tagged-union type for tokens from the join expression.
* @see Parser.TType
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static class Token {
private TType type;
Token(TType type) {
this.type = type;
}
public TType getType() { return type; }
public Node getNode() throws IOException {
throw new IOException("Expected nodetype");
}
public double getNum() throws IOException {
throw new IOException("Expected numtype");
}
public String getStr() throws IOException {
throw new IOException("Expected strtype");
}
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static class NumToken extends Token {
private double num;
public NumToken(double num) {
super(TType.NUM);
this.num = num;
}
public double getNum() { return num; }
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static class NodeToken extends Token {
private Node node;
NodeToken(Node node) {
super(TType.CIF);
this.node = node;
}
public Node getNode() {
return node;
}
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static class StrToken extends Token {
private String str;
public StrToken(TType type, String str) {
super(type);
this.str = str;
}
public String getStr() {
return str;
}
}
/**
* Simple lexer wrapping a StreamTokenizer.
* This encapsulates the creation of tagged-union Tokens and initializes the
* SteamTokenizer.
*/
private static class Lexer {
private StreamTokenizer tok;
Lexer(String s) {
tok = new StreamTokenizer(new CharArrayReader(s.toCharArray()));
tok.quoteChar('"');
tok.parseNumbers();
tok.ordinaryChar(',');
tok.ordinaryChar('(');
tok.ordinaryChar(')');
tok.wordChars('$','$');
tok.wordChars('_','_');
}
Token next() throws IOException {
int type = tok.nextToken();
switch (type) {
case StreamTokenizer.TT_EOF:
case StreamTokenizer.TT_EOL:
return null;
case StreamTokenizer.TT_NUMBER:
return new NumToken(tok.nval);
case StreamTokenizer.TT_WORD:
return new StrToken(TType.IDENT, tok.sval);
case '"':
return new StrToken(TType.QUOT, tok.sval);
default:
switch (type) {
case ',':
return new Token(TType.COMMA);
case '(':
return new Token(TType.LPAREN);
case ')':
return new Token(TType.RPAREN);
default:
throw new IOException("Unexpected: " + type);
}
}
}
}
@SuppressWarnings("unchecked")
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract static class Node extends ComposableInputFormat {
/**
* Return the node type registered for the particular identifier.
* By default, this is a CNode for any composite node and a WNode
* for "wrapped" nodes. User nodes will likely be composite
* nodes.
* @see #addIdentifier(java.lang.String, java.lang.Class[], java.lang.Class, java.lang.Class)
* @see CompositeInputFormat#setFormat(org.apache.hadoop.mapred.JobConf)
*/
static Node forIdent(String ident) throws IOException {
try {
if (!nodeCstrMap.containsKey(ident)) {
throw new IOException("No nodetype for " + ident);
}
return nodeCstrMap.get(ident).newInstance(ident);
} catch (IllegalAccessException e) {
throw new IOException(e);
} catch (InstantiationException e) {
throw new IOException(e);
} catch (InvocationTargetException e) {
throw new IOException(e);
}
}
private static final Class<?>[] ncstrSig = { String.class };
private static final
Map<String,Constructor<? extends Node>> nodeCstrMap =
new HashMap<String,Constructor<? extends Node>>();
protected static final Map<String,Constructor<? extends
ComposableRecordReader>> rrCstrMap =
new HashMap<String,Constructor<? extends ComposableRecordReader>>();
/**
* For a given identifier, add a mapping to the nodetype for the parse
* tree and to the ComposableRecordReader to be created, including the
* formals required to invoke the constructor.
* The nodetype and constructor signature should be filled in from the
* child node.
*/
protected static void addIdentifier(String ident, Class<?>[] mcstrSig,
Class<? extends Node> nodetype,
Class<? extends ComposableRecordReader> cl)
throws NoSuchMethodException {
Constructor<? extends Node> ncstr =
nodetype.getDeclaredConstructor(ncstrSig);
ncstr.setAccessible(true);
nodeCstrMap.put(ident, ncstr);
Constructor<? extends ComposableRecordReader> mcstr =
cl.getDeclaredConstructor(mcstrSig);
mcstr.setAccessible(true);
rrCstrMap.put(ident, mcstr);
}
// inst
protected int id = -1;
protected String ident;
protected Class<? extends WritableComparator> cmpcl;
protected Node(String ident) {
this.ident = ident;
}
protected void setID(int id) {
this.id = id;
}
protected void setKeyComparator(
Class<? extends WritableComparator> cmpcl) {
this.cmpcl = cmpcl;
}
abstract void parse(List<Token> args, Configuration conf)
throws IOException;
}
/**
* Nodetype in the parse tree for "wrapped" InputFormats.
*/
static class WNode extends Node {
private static final Class<?>[] cstrSig =
{ Integer.TYPE, RecordReader.class, Class.class };
@SuppressWarnings("unchecked")
static void addIdentifier(String ident,
Class<? extends ComposableRecordReader> cl)
throws NoSuchMethodException {
Node.addIdentifier(ident, cstrSig, WNode.class, cl);
}
private String indir;
private InputFormat<?, ?> inf;
public WNode(String ident) {
super(ident);
}
/**
* Let the first actual define the InputFormat and the second define
* the <tt>mapred.input.dir</tt> property.
*/
@Override
public void parse(List<Token> ll, Configuration conf) throws IOException {
StringBuilder sb = new StringBuilder();
Iterator<Token> i = ll.iterator();
while (i.hasNext()) {
Token t = i.next();
if (TType.COMMA.equals(t.getType())) {
try {
inf = (InputFormat<?, ?>)ReflectionUtils.newInstance(
conf.getClassByName(sb.toString()), conf);
} catch (ClassNotFoundException e) {
throw new IOException(e);
} catch (IllegalArgumentException e) {
throw new IOException(e);
}
break;
}
sb.append(t.getStr());
}
if (!i.hasNext()) {
throw new IOException("Parse error");
}
Token t = i.next();
if (!TType.QUOT.equals(t.getType())) {
throw new IOException("Expected quoted string");
}
indir = t.getStr();
// no check for ll.isEmpty() to permit extension
}
private Configuration getConf(Configuration jconf) throws IOException {
Job job = Job.getInstance(jconf);
FileInputFormat.setInputPaths(job, indir);
return job.getConfiguration();
}
public List<InputSplit> getSplits(JobContext context)
throws IOException, InterruptedException {
return inf.getSplits(
new JobContextImpl(getConf(context.getConfiguration()),
context.getJobID()));
}
public ComposableRecordReader<?, ?> createRecordReader(InputSplit split,
TaskAttemptContext taskContext)
throws IOException, InterruptedException {
try {
if (!rrCstrMap.containsKey(ident)) {
throw new IOException("No RecordReader for " + ident);
}
Configuration conf = getConf(taskContext.getConfiguration());
TaskAttemptContext context =
new TaskAttemptContextImpl(conf,
TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID)),
new WrappedStatusReporter(taskContext));
return rrCstrMap.get(ident).newInstance(id,
inf.createRecordReader(split, context), cmpcl);
} catch (IllegalAccessException e) {
throw new IOException(e);
} catch (InstantiationException e) {
throw new IOException(e);
} catch (InvocationTargetException e) {
throw new IOException(e);
}
}
public String toString() {
return ident + "(" + inf.getClass().getName() + ",\"" + indir + "\")";
}
}
private static class WrappedStatusReporter extends StatusReporter {
TaskAttemptContext context;
public WrappedStatusReporter(TaskAttemptContext context) {
this.context = context;
}
@Override
public Counter getCounter(Enum<?> name) {
return context.getCounter(name);
}
@Override
public Counter getCounter(String group, String name) {
return context.getCounter(group, name);
}
@Override
public void progress() {
context.progress();
}
@Override
public float getProgress() {
return context.getProgress();
}
@Override
public void setStatus(String status) {
context.setStatus(status);
}
}
/**
* Internal nodetype for "composite" InputFormats.
*/
static class CNode extends Node {
private static final Class<?>[] cstrSig =
{ Integer.TYPE, Configuration.class, Integer.TYPE, Class.class };
@SuppressWarnings("unchecked")
static void addIdentifier(String ident,
Class<? extends ComposableRecordReader> cl)
throws NoSuchMethodException {
Node.addIdentifier(ident, cstrSig, CNode.class, cl);
}
// inst
private ArrayList<Node> kids = new ArrayList<Node>();
public CNode(String ident) {
super(ident);
}
@Override
public void setKeyComparator(Class<? extends WritableComparator> cmpcl) {
super.setKeyComparator(cmpcl);
for (Node n : kids) {
n.setKeyComparator(cmpcl);
}
}
/**
* Combine InputSplits from child InputFormats into a
* {@link CompositeInputSplit}.
*/
@SuppressWarnings("unchecked")
public List<InputSplit> getSplits(JobContext job)
throws IOException, InterruptedException {
List<List<InputSplit>> splits =
new ArrayList<List<InputSplit>>(kids.size());
for (int i = 0; i < kids.size(); ++i) {
List<InputSplit> tmp = kids.get(i).getSplits(job);
if (null == tmp) {
throw new IOException("Error gathering splits from child RReader");
}
if (i > 0 && splits.get(i-1).size() != tmp.size()) {
throw new IOException("Inconsistent split cardinality from child " +
i + " (" + splits.get(i-1).size() + "/" + tmp.size() + ")");
}
splits.add(i, tmp);
}
final int size = splits.get(0).size();
List<InputSplit> ret = new ArrayList<InputSplit>();
for (int i = 0; i < size; ++i) {
CompositeInputSplit split = new CompositeInputSplit(splits.size());
for (int j = 0; j < splits.size(); ++j) {
split.add(splits.get(j).get(i));
}
ret.add(split);
}
return ret;
}
@SuppressWarnings("unchecked") // child types unknowable
public ComposableRecordReader
createRecordReader(InputSplit split, TaskAttemptContext taskContext)
throws IOException, InterruptedException {
if (!(split instanceof CompositeInputSplit)) {
throw new IOException("Invalid split type:" +
split.getClass().getName());
}
final CompositeInputSplit spl = (CompositeInputSplit)split;
final int capacity = kids.size();
CompositeRecordReader ret = null;
try {
if (!rrCstrMap.containsKey(ident)) {
throw new IOException("No RecordReader for " + ident);
}
ret = (CompositeRecordReader)rrCstrMap.get(ident).
newInstance(id, taskContext.getConfiguration(), capacity, cmpcl);
} catch (IllegalAccessException e) {
throw new IOException(e);
} catch (InstantiationException e) {
throw new IOException(e);
} catch (InvocationTargetException e) {
throw new IOException(e);
}
for (int i = 0; i < capacity; ++i) {
ret.add(kids.get(i).createRecordReader(spl.get(i), taskContext));
}
return (ComposableRecordReader)ret;
}
/**
* Parse a list of comma-separated nodes.
*/
public void parse(List<Token> args, Configuration conf)
throws IOException {
ListIterator<Token> i = args.listIterator();
while (i.hasNext()) {
Token t = i.next();
t.getNode().setID(i.previousIndex() >> 1);
kids.add(t.getNode());
if (i.hasNext() && !TType.COMMA.equals(i.next().getType())) {
throw new IOException("Expected ','");
}
}
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(ident + "(");
for (Node n : kids) {
sb.append(n.toString() + ",");
}
sb.setCharAt(sb.length() - 1, ')');
return sb.toString();
}
}
private static Token reduce(Stack<Token> st, Configuration conf)
throws IOException {
LinkedList<Token> args = new LinkedList<Token>();
while (!st.isEmpty() && !TType.LPAREN.equals(st.peek().getType())) {
args.addFirst(st.pop());
}
if (st.isEmpty()) {
throw new IOException("Unmatched ')'");
}
st.pop();
if (st.isEmpty() || !TType.IDENT.equals(st.peek().getType())) {
throw new IOException("Identifier expected");
}
Node n = Node.forIdent(st.pop().getStr());
n.parse(args, conf);
return new NodeToken(n);
}
/**
* Given an expression and an optional comparator, build a tree of
* InputFormats using the comparator to sort keys.
*/
static Node parse(String expr, Configuration conf) throws IOException {
if (null == expr) {
throw new IOException("Expression is null");
}
Class<? extends WritableComparator> cmpcl = conf.getClass(
CompositeInputFormat.JOIN_COMPARATOR, null, WritableComparator.class);
Lexer lex = new Lexer(expr);
Stack<Token> st = new Stack<Token>();
Token tok;
while ((tok = lex.next()) != null) {
if (TType.RPAREN.equals(tok.getType())) {
st.push(reduce(st, conf));
} else {
st.push(tok);
}
}
if (st.size() == 1 && TType.CIF.equals(st.peek().getType())) {
Node ret = st.pop().getNode();
if (cmpcl != null) {
ret.setKeyComparator(cmpcl);
}
return ret;
}
throw new IOException("Missing ')'");
}
}
| 18,561 | 31.737213 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/JoinRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import java.util.PriorityQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Base class for Composite joins returning Tuples of arbitrary Writables.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class JoinRecordReader<K extends WritableComparable<?>>
extends CompositeRecordReader<K,Writable,TupleWritable> {
public JoinRecordReader(int id, Configuration conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, capacity, cmpcl);
setConf(conf);
}
/**
* Emit the next set of key, value pairs as defined by the child
* RecordReaders and operation associated with this composite RR.
*/
public boolean nextKeyValue()
throws IOException, InterruptedException {
if (key == null) {
key = createKey();
}
if (jc.flush(value)) {
ReflectionUtils.copy(conf, jc.key(), key);
return true;
}
jc.clear();
if (value == null) {
value = createValue();
}
final PriorityQueue<ComposableRecordReader<K,?>> q =
getRecordReaderQueue();
K iterkey = createKey();
while (q != null && !q.isEmpty()) {
fillJoinCollector(iterkey);
jc.reset(iterkey);
if (jc.flush(value)) {
ReflectionUtils.copy(conf, jc.key(), key);
return true;
}
jc.clear();
}
return false;
}
public TupleWritable createValue() {
return createTupleWritable();
}
/**
* Return an iterator wrapping the JoinCollector.
*/
protected ResetableIterator<TupleWritable> getDelegate() {
return new JoinDelegationIterator();
}
/**
* Since the JoinCollector is effecting our operation, we need only
* provide an iterator proxy wrapping its operation.
*/
protected class JoinDelegationIterator
implements ResetableIterator<TupleWritable> {
public boolean hasNext() {
return jc.hasNext();
}
public boolean next(TupleWritable val) throws IOException {
return jc.flush(val);
}
public boolean replay(TupleWritable val) throws IOException {
return jc.replay(val);
}
public void reset() {
jc.reset(jc.key());
}
public void add(TupleWritable item) throws IOException {
throw new UnsupportedOperationException();
}
public void close() throws IOException {
jc.close();
}
public void clear() {
jc.clear();
}
}
}
| 3,613 | 27.912 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.DataOutput;
import java.io.DataInput;
import java.io.IOException;
import java.util.BitSet;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.
*
* This is *not* a general-purpose tuple type. In almost all cases, users are
* encouraged to implement their own serializable types, which can perform
* better validation and provide more efficient encodings than this class is
* capable. TupleWritable relies on the join framework for type safety and
* assumes its instances will rarely be persisted, assumptions not only
* incompatible with, but contrary to the general case.
*
* @see org.apache.hadoop.io.Writable
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TupleWritable implements Writable, Iterable<Writable> {
protected BitSet written;
private Writable[] values;
/**
* Create an empty tuple with no allocated storage for writables.
*/
public TupleWritable() {
written = new BitSet(0);
}
/**
* Initialize tuple with storage; unknown whether any of them contain
* "written" values.
*/
public TupleWritable(Writable[] vals) {
written = new BitSet(vals.length);
values = vals;
}
/**
* Return true if tuple has an element at the position provided.
*/
public boolean has(int i) {
return written.get(i);
}
/**
* Get ith Writable from Tuple.
*/
public Writable get(int i) {
return values[i];
}
/**
* The number of children in this Tuple.
*/
public int size() {
return values.length;
}
/**
* {@inheritDoc}
*/
public boolean equals(Object other) {
if (other instanceof TupleWritable) {
TupleWritable that = (TupleWritable)other;
if (!this.written.equals(that.written)) {
return false;
}
for (int i = 0; i < values.length; ++i) {
if (!has(i)) continue;
if (!values[i].equals(that.get(i))) {
return false;
}
}
return true;
}
return false;
}
public int hashCode() {
assert false : "hashCode not designed";
return written.hashCode();
}
/**
* Return an iterator over the elements in this tuple.
* Note that this doesn't flatten the tuple; one may receive tuples
* from this iterator.
*/
public Iterator<Writable> iterator() {
final TupleWritable t = this;
return new Iterator<Writable>() {
int bitIndex = written.nextSetBit(0);
public boolean hasNext() {
return bitIndex >= 0;
}
public Writable next() {
int returnIndex = bitIndex;
if (returnIndex < 0)
throw new NoSuchElementException();
bitIndex = written.nextSetBit(bitIndex+1);
return t.get(returnIndex);
}
public void remove() {
if (!written.get(bitIndex)) {
throw new IllegalStateException(
"Attempt to remove non-existent val");
}
written.clear(bitIndex);
}
};
}
/**
* Convert Tuple to String as in the following.
* <tt>[<child1>,<child2>,...,<childn>]</tt>
*/
public String toString() {
StringBuffer buf = new StringBuffer("[");
for (int i = 0; i < values.length; ++i) {
buf.append(has(i) ? values[i].toString() : "");
buf.append(",");
}
if (values.length != 0)
buf.setCharAt(buf.length() - 1, ']');
else
buf.append(']');
return buf.toString();
}
// Writable
/** Writes each Writable to <code>out</code>.
* TupleWritable format:
* {@code
* <count><type1><type2>...<typen><obj1><obj2>...<objn>
* }
*/
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, values.length);
writeBitSet(out, values.length, written);
for (int i = 0; i < values.length; ++i) {
Text.writeString(out, values[i].getClass().getName());
}
for (int i = 0; i < values.length; ++i) {
if (has(i)) {
values[i].write(out);
}
}
}
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked") // No static typeinfo on Tuples
public void readFields(DataInput in) throws IOException {
int card = WritableUtils.readVInt(in);
values = new Writable[card];
readBitSet(in, card, written);
Class<? extends Writable>[] cls = new Class[card];
try {
for (int i = 0; i < card; ++i) {
cls[i] = Class.forName(Text.readString(in)).asSubclass(Writable.class);
}
for (int i = 0; i < card; ++i) {
if (cls[i].equals(NullWritable.class)) {
values[i] = NullWritable.get();
} else {
values[i] = cls[i].newInstance();
}
if (has(i)) {
values[i].readFields(in);
}
}
} catch (ClassNotFoundException e) {
throw new IOException("Failed tuple init", e);
} catch (IllegalAccessException e) {
throw new IOException("Failed tuple init", e);
} catch (InstantiationException e) {
throw new IOException("Failed tuple init", e);
}
}
/**
* Record that the tuple contains an element at the position provided.
*/
void setWritten(int i) {
written.set(i);
}
/**
* Record that the tuple does not contain an element at the position
* provided.
*/
void clearWritten(int i) {
written.clear(i);
}
/**
* Clear any record of which writables have been written to, without
* releasing storage.
*/
void clearWritten() {
written.clear();
}
/**
* Writes the bit set to the stream. The first 64 bit-positions of the bit
* set are written as a VLong for backwards-compatibility with older
* versions of TupleWritable. All bit-positions >= 64 are encoded as a byte
* for every 8 bit-positions.
*/
private static final void writeBitSet(DataOutput stream, int nbits,
BitSet bitSet) throws IOException {
long bits = 0L;
int bitSetIndex = bitSet.nextSetBit(0);
for (;bitSetIndex >= 0 && bitSetIndex < Long.SIZE;
bitSetIndex=bitSet.nextSetBit(bitSetIndex+1)) {
bits |= 1L << bitSetIndex;
}
WritableUtils.writeVLong(stream,bits);
if (nbits > Long.SIZE) {
bits = 0L;
for (int lastWordWritten = 0; bitSetIndex >= 0 && bitSetIndex < nbits;
bitSetIndex = bitSet.nextSetBit(bitSetIndex+1)) {
int bitsIndex = bitSetIndex % Byte.SIZE;
int word = (bitSetIndex-Long.SIZE) / Byte.SIZE;
if (word > lastWordWritten) {
stream.writeByte((byte)bits);
bits = 0L;
for (lastWordWritten++;lastWordWritten<word;lastWordWritten++) {
stream.writeByte((byte)bits);
}
}
bits |= 1L << bitsIndex;
}
stream.writeByte((byte)bits);
}
}
/**
* Reads a bitset from the stream that has been written with
* {@link #writeBitSet(DataOutput, int, BitSet)}.
*/
private static final void readBitSet(DataInput stream, int nbits,
BitSet bitSet) throws IOException {
bitSet.clear();
long initialBits = WritableUtils.readVLong(stream);
long last = 0L;
while (0L != initialBits) {
last = Long.lowestOneBit(initialBits);
initialBits ^= last;
bitSet.set(Long.numberOfTrailingZeros(last));
}
for (int offset=Long.SIZE; offset < nbits; offset+=Byte.SIZE) {
byte bits = stream.readByte();
while (0 != bits) {
last = Long.lowestOneBit(bits);
bits ^= last;
bitSet.set(Long.numberOfTrailingZeros(last) + offset);
}
}
}
}
| 8,728 | 28.19398 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Proxy class for a RecordReader participating in the join framework.
*
* This class keeps track of the "head" key-value pair for the
* provided RecordReader and keeps a store of values matching a key when
* this source is participating in a join.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class WrappedRecordReader<K extends WritableComparable<?>,
U extends Writable> extends ComposableRecordReader<K,U> {
protected boolean empty = false;
private RecordReader<K,U> rr;
private int id; // index at which values will be inserted in collector
protected WritableComparator cmp = null;
private K key; // key at the top of this RR
private U value; // value assoc with key
private ResetableIterator<U> vjoin;
private Configuration conf = new Configuration();
@SuppressWarnings("unchecked")
private Class<? extends WritableComparable> keyclass = null;
private Class<? extends Writable> valueclass = null;
protected WrappedRecordReader(int id) {
this.id = id;
vjoin = new StreamBackedIterator<U>();
}
/**
* For a given RecordReader rr, occupy position id in collector.
*/
WrappedRecordReader(int id, RecordReader<K,U> rr,
Class<? extends WritableComparator> cmpcl)
throws IOException, InterruptedException {
this.id = id;
this.rr = rr;
if (cmpcl != null) {
try {
this.cmp = cmpcl.newInstance();
} catch (InstantiationException e) {
throw new IOException(e);
} catch (IllegalAccessException e) {
throw new IOException(e);
}
}
vjoin = new StreamBackedIterator<U>();
}
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
rr.initialize(split, context);
conf = context.getConfiguration();
nextKeyValue();
if (!empty) {
keyclass = key.getClass().asSubclass(WritableComparable.class);
valueclass = value.getClass();
if (cmp == null) {
cmp = WritableComparator.get(keyclass, conf);
}
}
}
/**
* Request new key from proxied RR.
*/
@SuppressWarnings("unchecked")
public K createKey() {
if (keyclass != null) {
return (K) ReflectionUtils.newInstance(keyclass, conf);
}
return (K) NullWritable.get();
}
@SuppressWarnings("unchecked")
public U createValue() {
if (valueclass != null) {
return (U) ReflectionUtils.newInstance(valueclass, conf);
}
return (U) NullWritable.get();
}
/** {@inheritDoc} */
public int id() {
return id;
}
/**
* Return the key at the head of this RR.
*/
public K key() {
return key;
}
/**
* Clone the key at the head of this RR into the object supplied.
*/
public void key(K qkey) throws IOException {
ReflectionUtils.copy(conf, key, qkey);
}
/**
* Return true if the RR- including the k,v pair stored in this object-
* is exhausted.
*/
public boolean hasNext() {
return !empty;
}
/**
* Skip key-value pairs with keys less than or equal to the key provided.
*/
public void skip(K key) throws IOException, InterruptedException {
if (hasNext()) {
while (cmp.compare(key(), key) <= 0 && next());
}
}
/**
* Add an iterator to the collector at the position occupied by this
* RecordReader over the values in this stream paired with the key
* provided (ie register a stream of values from this source matching K
* with a collector).
*/
@SuppressWarnings("unchecked")
public void accept(CompositeRecordReader.JoinCollector i, K key)
throws IOException, InterruptedException {
vjoin.clear();
if (key() != null && 0 == cmp.compare(key, key())) {
do {
vjoin.add(value);
} while (next() && 0 == cmp.compare(key, key()));
}
i.add(id, vjoin);
}
/**
* Read the next k,v pair into the head of this object; return true iff
* the RR and this are exhausted.
*/
public boolean nextKeyValue() throws IOException, InterruptedException {
if (hasNext()) {
next();
return true;
}
return false;
}
/**
* Read the next k,v pair into the head of this object; return true iff
* the RR and this are exhausted.
*/
private boolean next() throws IOException, InterruptedException {
empty = !rr.nextKeyValue();
key = rr.getCurrentKey();
value = rr.getCurrentValue();
return !empty;
}
/**
* Get current key
*/
public K getCurrentKey() throws IOException, InterruptedException {
return rr.getCurrentKey();
}
/**
* Get current value
*/
public U getCurrentValue() throws IOException, InterruptedException {
return rr.getCurrentValue();
}
/**
* Request progress from proxied RR.
*/
public float getProgress() throws IOException, InterruptedException {
return rr.getProgress();
}
/**
* Forward close request to proxied RR.
*/
public void close() throws IOException {
rr.close();
}
/**
* Implement Comparable contract (compare key at head of proxied RR
* with that of another).
*/
public int compareTo(ComposableRecordReader<K,?> other) {
return cmp.compare(key(), other.key());
}
/**
* Return true iff compareTo(other) retn true.
*/
@SuppressWarnings("unchecked") // Explicit type check prior to cast
public boolean equals(Object other) {
return other instanceof ComposableRecordReader
&& 0 == compareTo((ComposableRecordReader)other);
}
public int hashCode() {
assert false : "hashCode not designed";
return 42;
}
}
| 7,049 | 27.658537 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/MultiFilterRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import java.util.PriorityQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Base class for Composite join returning values derived from multiple
* sources, but generally not tuples.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class MultiFilterRecordReader<K extends WritableComparable<?>,
V extends Writable>
extends CompositeRecordReader<K,V,V> {
private TupleWritable ivalue = null;
public MultiFilterRecordReader(int id, Configuration conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, capacity, cmpcl);
setConf(conf);
}
/**
* For each tuple emitted, return a value (typically one of the values
* in the tuple).
* Modifying the Writables in the tuple is permitted and unlikely to affect
* join behavior in most cases, but it is not recommended. It's safer to
* clone first.
*/
protected abstract V emit(TupleWritable dst) throws IOException;
/**
* Default implementation offers {@link #emit} every Tuple from the
* collector (the outer join of child RRs).
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
return true;
}
/** {@inheritDoc} */
public boolean nextKeyValue() throws IOException, InterruptedException {
if (key == null) {
key = createKey();
}
if (value == null) {
value = createValue();
}
if (jc.flush(ivalue)) {
ReflectionUtils.copy(conf, jc.key(), key);
ReflectionUtils.copy(conf, emit(ivalue), value);
return true;
}
if (ivalue == null) {
ivalue = createTupleWritable();
}
jc.clear();
final PriorityQueue<ComposableRecordReader<K,?>> q =
getRecordReaderQueue();
K iterkey = createKey();
while (q != null && !q.isEmpty()) {
fillJoinCollector(iterkey);
jc.reset(iterkey);
if (jc.flush(ivalue)) {
ReflectionUtils.copy(conf, jc.key(), key);
ReflectionUtils.copy(conf, emit(ivalue), value);
return true;
}
jc.clear();
}
return false;
}
@SuppressWarnings("unchecked")
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
super.initialize(split, context);
}
/**
* Return an iterator returning a single value from the tuple.
* @see MultiFilterDelegationIterator
*/
protected ResetableIterator<V> getDelegate() {
return new MultiFilterDelegationIterator();
}
/**
* Proxy the JoinCollector, but include callback to emit.
*/
protected class MultiFilterDelegationIterator
implements ResetableIterator<V> {
public boolean hasNext() {
return jc.hasNext();
}
public boolean next(V val) throws IOException {
boolean ret;
if (ret = jc.flush(ivalue)) {
ReflectionUtils.copy(getConf(), emit(ivalue), val);
}
return ret;
}
public boolean replay(V val) throws IOException {
ReflectionUtils.copy(getConf(), emit(ivalue), val);
return true;
}
public void reset() {
jc.reset(jc.key());
}
public void add(V item) throws IOException {
throw new UnsupportedOperationException();
}
public void close() throws IOException {
jc.close();
}
public void clear() {
jc.clear();
}
}
}
| 4,685 | 28.847134 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/ResetableIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* This defines an interface to a stateful Iterator that can replay elements
* added to it directly.
* Note that this does not extend {@link java.util.Iterator}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ResetableIterator<T extends Writable> {
public static class EMPTY<U extends Writable>
implements ResetableIterator<U> {
public boolean hasNext() { return false; }
public void reset() { }
public void close() throws IOException { }
public void clear() { }
public boolean next(U val) throws IOException {
return false;
}
public boolean replay(U val) throws IOException {
return false;
}
public void add(U item) throws IOException {
throw new UnsupportedOperationException();
}
}
/**
* True if a call to next may return a value. This is permitted false
* positives, but not false negatives.
*/
public boolean hasNext();
/**
* Assign next value to actual.
* It is required that elements added to a ResetableIterator be returned in
* the same order after a call to {@link #reset} (FIFO).
*
* Note that a call to this may fail for nested joins (i.e. more elements
* available, but none satisfying the constraints of the join)
*/
public boolean next(T val) throws IOException;
/**
* Assign last value returned to actual.
*/
public boolean replay(T val) throws IOException;
/**
* Set iterator to return to the start of its range. Must be called after
* calling {@link #add} to avoid a ConcurrentModificationException.
*/
public void reset();
/**
* Add an element to the collection of elements to iterate over.
*/
public void add(T item) throws IOException;
/**
* Close datasources and release resources. Calling methods on the iterator
* after calling close has undefined behavior.
*/
// XXX is this necessary?
public void close() throws IOException;
/**
* Close datasources, but do not release internal resources. Calling this
* method should permit the object to be reused with a different datasource.
*/
public void clear();
}
| 3,178 | 31.438776 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.HashSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.serializer.*;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This InputSplit contains a set of child InputSplits. Any InputSplit inserted
* into this collection must have a public default constructor.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CompositeInputSplit extends InputSplit implements Writable {
private int fill = 0;
private long totsize = 0L;
private InputSplit[] splits;
private Configuration conf = new Configuration();
public CompositeInputSplit() { }
public CompositeInputSplit(int capacity) {
splits = new InputSplit[capacity];
}
/**
* Add an InputSplit to this collection.
* @throws IOException If capacity was not specified during construction
* or if capacity has been reached.
*/
public void add(InputSplit s) throws IOException, InterruptedException {
if (null == splits) {
throw new IOException("Uninitialized InputSplit");
}
if (fill == splits.length) {
throw new IOException("Too many splits");
}
splits[fill++] = s;
totsize += s.getLength();
}
/**
* Get ith child InputSplit.
*/
public InputSplit get(int i) {
return splits[i];
}
/**
* Return the aggregate length of all child InputSplits currently added.
*/
public long getLength() throws IOException {
return totsize;
}
/**
* Get the length of ith child InputSplit.
*/
public long getLength(int i) throws IOException, InterruptedException {
return splits[i].getLength();
}
/**
* Collect a set of hosts from all child InputSplits.
*/
public String[] getLocations() throws IOException, InterruptedException {
HashSet<String> hosts = new HashSet<String>();
for (InputSplit s : splits) {
String[] hints = s.getLocations();
if (hints != null && hints.length > 0) {
for (String host : hints) {
hosts.add(host);
}
}
}
return hosts.toArray(new String[hosts.size()]);
}
/**
* getLocations from ith InputSplit.
*/
public String[] getLocation(int i) throws IOException, InterruptedException {
return splits[i].getLocations();
}
/**
* Write splits in the following format.
* {@code
* <count><class1><class2>...<classn><split1><split2>...<splitn>
* }
*/
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, splits.length);
for (InputSplit s : splits) {
Text.writeString(out, s.getClass().getName());
}
for (InputSplit s : splits) {
SerializationFactory factory = new SerializationFactory(conf);
Serializer serializer =
factory.getSerializer(s.getClass());
serializer.open((DataOutputStream)out);
serializer.serialize(s);
}
}
/**
* {@inheritDoc}
* @throws IOException If the child InputSplit cannot be read, typically
* for failing access checks.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public void readFields(DataInput in) throws IOException {
int card = WritableUtils.readVInt(in);
if (splits == null || splits.length != card) {
splits = new InputSplit[card];
}
Class<? extends InputSplit>[] cls = new Class[card];
try {
for (int i = 0; i < card; ++i) {
cls[i] =
Class.forName(Text.readString(in)).asSubclass(InputSplit.class);
}
for (int i = 0; i < card; ++i) {
splits[i] = ReflectionUtils.newInstance(cls[i], null);
SerializationFactory factory = new SerializationFactory(conf);
Deserializer deserializer = factory.getDeserializer(cls[i]);
deserializer.open((DataInputStream)in);
splits[i] = (InputSplit)deserializer.deserialize(splits[i]);
}
} catch (ClassNotFoundException e) {
throw new IOException("Failed split init", e);
}
}
}
| 5,286 | 30.658683 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.fieldsel;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* This class implements a reducer class that can be used to perform field
* selections in a manner similar to unix cut.
*
* The input data is treated as fields separated by a user specified
* separator (the default value is "\t"). The user can specify a list of
* fields that form the reduce output keys, and a list of fields that form
* the reduce output values. The fields are the union of those from the key
* and those from the value.
*
* The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
*
* The reduce output field list spec is under attribute
* "mapreduce.fieldsel.reduce.output.key.value.fields.spec".
* The value is expected to be like
* "keyFieldsSpec:valueFieldsSpec" key/valueFieldsSpec are comma (,)
* separated field spec: fieldSpec,fieldSpec,fieldSpec ... Each field spec
* can be a simple number (e.g. 5) specifying a specific field, or a range
* (like 2-5) to specify a range of fields, or an open range (like 3-)
* specifying all the fields starting from field 3. The open range field
* spec applies value fields only. They have no effect on the key fields.
*
* Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields
* 4,3,0 and 1 for keys, and use fields 6,5,1,2,3,7 and above for values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FieldSelectionReducer<K, V>
extends Reducer<Text, Text, Text, Text> {
private String fieldSeparator = "\t";
private String reduceOutputKeyValueSpec;
private List<Integer> reduceOutputKeyFieldList = new ArrayList<Integer>();
private List<Integer> reduceOutputValueFieldList = new ArrayList<Integer>();
private int allReduceValueFieldsFrom = -1;
public static final Log LOG = LogFactory.getLog("FieldSelectionMapReduce");
public void setup(Context context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
this.fieldSeparator =
conf.get(FieldSelectionHelper.DATA_FIELD_SEPERATOR, "\t");
this.reduceOutputKeyValueSpec =
conf.get(FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC, "0-:");
allReduceValueFieldsFrom = FieldSelectionHelper.parseOutputKeyValueSpec(
reduceOutputKeyValueSpec, reduceOutputKeyFieldList,
reduceOutputValueFieldList);
LOG.info(FieldSelectionHelper.specToString(fieldSeparator,
reduceOutputKeyValueSpec, allReduceValueFieldsFrom,
reduceOutputKeyFieldList, reduceOutputValueFieldList));
}
public void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
String keyStr = key.toString() + this.fieldSeparator;
for (Text val : values) {
FieldSelectionHelper helper = new FieldSelectionHelper();
helper.extractOutputKeyValue(keyStr, val.toString(),
fieldSeparator, reduceOutputKeyFieldList,
reduceOutputValueFieldList, allReduceValueFieldsFrom, false, false);
context.write(helper.getKey(), helper.getValue());
}
}
}
| 4,318 | 39.364486 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.fieldsel;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
/**
* This class implements a mapper class that can be used to perform
* field selections in a manner similar to unix cut. The input data is treated
* as fields separated by a user specified separator (the default value is
* "\t"). The user can specify a list of fields that form the map output keys,
* and a list of fields that form the map output values. If the inputformat is
* TextInputFormat, the mapper will ignore the key to the map function. and the
* fields are from the value only. Otherwise, the fields are the union of those
* from the key and those from the value.
*
* The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
*
* The map output field list spec is under attribute
* "mapreduce.fieldsel.map.output.key.value.fields.spec".
* The value is expected to be like
* "keyFieldsSpec:valueFieldsSpec" key/valueFieldsSpec are comma (,) separated
* field spec: fieldSpec,fieldSpec,fieldSpec ... Each field spec can be a
* simple number (e.g. 5) specifying a specific field, or a range (like 2-5)
* to specify a range of fields, or an open range (like 3-) specifying all
* the fields starting from field 3. The open range field spec applies value
* fields only. They have no effect on the key fields.
*
* Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields
* 4,3,0 and 1 for keys, and use fields 6,5,1,2,3,7 and above for values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FieldSelectionMapper<K, V>
extends Mapper<K, V, Text, Text> {
private String mapOutputKeyValueSpec;
private boolean ignoreInputKey;
private String fieldSeparator = "\t";
private List<Integer> mapOutputKeyFieldList = new ArrayList<Integer>();
private List<Integer> mapOutputValueFieldList = new ArrayList<Integer>();
private int allMapValueFieldsFrom = -1;
public static final Log LOG = LogFactory.getLog("FieldSelectionMapReduce");
public void setup(Context context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
this.fieldSeparator =
conf.get(FieldSelectionHelper.DATA_FIELD_SEPERATOR, "\t");
this.mapOutputKeyValueSpec =
conf.get(FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC, "0-:");
try {
this.ignoreInputKey = TextInputFormat.class.getCanonicalName().equals(
context.getInputFormatClass().getCanonicalName());
} catch (ClassNotFoundException e) {
throw new IOException("Input format class not found", e);
}
allMapValueFieldsFrom = FieldSelectionHelper.parseOutputKeyValueSpec(
mapOutputKeyValueSpec, mapOutputKeyFieldList, mapOutputValueFieldList);
LOG.info(FieldSelectionHelper.specToString(fieldSeparator,
mapOutputKeyValueSpec, allMapValueFieldsFrom, mapOutputKeyFieldList,
mapOutputValueFieldList) + "\nignoreInputKey:" + ignoreInputKey);
}
/**
* The identify function. Input key/value pair is written directly to output.
*/
public void map(K key, V val, Context context)
throws IOException, InterruptedException {
FieldSelectionHelper helper = new FieldSelectionHelper(
FieldSelectionHelper.emptyText, FieldSelectionHelper.emptyText);
helper.extractOutputKeyValue(key.toString(), val.toString(),
fieldSeparator, mapOutputKeyFieldList, mapOutputValueFieldList,
allMapValueFieldsFrom, ignoreInputKey, true);
context.write(helper.getKey(), helper.getValue());
}
}
| 4,803 | 42.279279 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.fieldsel;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
/**
* This class implements a mapper/reducer class that can be used to perform
* field selections in a manner similar to unix cut. The input data is treated
* as fields separated by a user specified separator (the default value is
* "\t"). The user can specify a list of fields that form the map output keys,
* and a list of fields that form the map output values. If the inputformat is
* TextInputFormat, the mapper will ignore the key to the map function. and the
* fields are from the value only. Otherwise, the fields are the union of those
* from the key and those from the value.
*
* The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
*
* The map output field list spec is under attribute
* "mapreduce.fieldsel.map.output.key.value.fields.spec".
* The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
* key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
* Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
* (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
* the fields starting from field 3. The open range field spec applies value fields only.
* They have no effect on the key fields.
*
* Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
* and use fields 6,5,1,2,3,7 and above for values.
*
* The reduce output field list spec is under attribute
* "mapreduce.fieldsel.reduce.output.key.value.fields.spec".
*
* The reducer extracts output key/value pairs in a similar manner, except that
* the key is never ignored.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FieldSelectionHelper {
public static Text emptyText = new Text("");
public static final String DATA_FIELD_SEPERATOR =
"mapreduce.fieldsel.data.field.separator";
public static final String MAP_OUTPUT_KEY_VALUE_SPEC =
"mapreduce.fieldsel.map.output.key.value.fields.spec";
public static final String REDUCE_OUTPUT_KEY_VALUE_SPEC =
"mapreduce.fieldsel.reduce.output.key.value.fields.spec";
/**
* Extract the actual field numbers from the given field specs.
* If a field spec is in the form of "n-" (like 3-), then n will be the
* return value. Otherwise, -1 will be returned.
* @param fieldListSpec an array of field specs
* @param fieldList an array of field numbers extracted from the specs.
* @return number n if some field spec is in the form of "n-", -1 otherwise.
*/
private static int extractFields(String[] fieldListSpec,
List<Integer> fieldList) {
int allFieldsFrom = -1;
int i = 0;
int j = 0;
int pos = -1;
String fieldSpec = null;
for (i = 0; i < fieldListSpec.length; i++) {
fieldSpec = fieldListSpec[i];
if (fieldSpec.length() == 0) {
continue;
}
pos = fieldSpec.indexOf('-');
if (pos < 0) {
Integer fn = Integer.valueOf(fieldSpec);
fieldList.add(fn);
} else {
String start = fieldSpec.substring(0, pos);
String end = fieldSpec.substring(pos + 1);
if (start.length() == 0) {
start = "0";
}
if (end.length() == 0) {
allFieldsFrom = Integer.parseInt(start);
continue;
}
int startPos = Integer.parseInt(start);
int endPos = Integer.parseInt(end);
for (j = startPos; j <= endPos; j++) {
fieldList.add(j);
}
}
}
return allFieldsFrom;
}
private static String selectFields(String[] fields, List<Integer> fieldList,
int allFieldsFrom, String separator) {
String retv = null;
int i = 0;
StringBuffer sb = null;
if (fieldList != null && fieldList.size() > 0) {
if (sb == null) {
sb = new StringBuffer();
}
for (Integer index : fieldList) {
if (index < fields.length) {
sb.append(fields[index]);
}
sb.append(separator);
}
}
if (allFieldsFrom >= 0) {
if (sb == null) {
sb = new StringBuffer();
}
for (i = allFieldsFrom; i < fields.length; i++) {
sb.append(fields[i]).append(separator);
}
}
if (sb != null) {
retv = sb.toString();
if (retv.length() > 0) {
retv = retv.substring(0, retv.length() - 1);
}
}
return retv;
}
public static int parseOutputKeyValueSpec(String keyValueSpec,
List<Integer> keyFieldList, List<Integer> valueFieldList) {
String[] keyValSpecs = keyValueSpec.split(":", -1);
String[] keySpec = keyValSpecs[0].split(",");
String[] valSpec = new String[0];
if (keyValSpecs.length > 1) {
valSpec = keyValSpecs[1].split(",");
}
FieldSelectionHelper.extractFields(keySpec, keyFieldList);
return FieldSelectionHelper.extractFields(valSpec, valueFieldList);
}
public static String specToString(String fieldSeparator, String keyValueSpec,
int allValueFieldsFrom, List<Integer> keyFieldList,
List<Integer> valueFieldList) {
StringBuffer sb = new StringBuffer();
sb.append("fieldSeparator: ").append(fieldSeparator).append("\n");
sb.append("keyValueSpec: ").append(keyValueSpec).append("\n");
sb.append("allValueFieldsFrom: ").append(allValueFieldsFrom);
sb.append("\n");
sb.append("keyFieldList.length: ").append(keyFieldList.size());
sb.append("\n");
for (Integer field : keyFieldList) {
sb.append("\t").append(field).append("\n");
}
sb.append("valueFieldList.length: ").append(valueFieldList.size());
sb.append("\n");
for (Integer field : valueFieldList) {
sb.append("\t").append(field).append("\n");
}
return sb.toString();
}
private Text key = null;
private Text value = null;
public FieldSelectionHelper() {
}
public FieldSelectionHelper(Text key, Text val) {
this.key = key;
this.value = val;
}
public Text getKey() {
return key;
}
public Text getValue() {
return value;
}
public void extractOutputKeyValue(String key, String val,
String fieldSep, List<Integer> keyFieldList, List<Integer> valFieldList,
int allValueFieldsFrom, boolean ignoreKey, boolean isMap) {
if (!ignoreKey) {
val = key + val;
}
String[] fields = val.split(fieldSep);
String newKey = selectFields(fields, keyFieldList, -1, fieldSep);
String newVal = selectFields(fields, valFieldList, allValueFieldsFrom,
fieldSep);
if (isMap && newKey == null) {
newKey = newVal;
newVal = null;
}
if (newKey != null) {
this.key = new Text(newKey);
}
if (newVal != null) {
this.value = new Text(newVal);
}
}
}
| 7,804 | 33.232456 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.map;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Multithreaded implementation for @link org.apache.hadoop.mapreduce.Mapper.
* <p>
* It can be used instead of the default implementation,
* {@link org.apache.hadoop.mapred.MapRunner}, when the Map operation is not CPU
* bound in order to improve throughput.
* <p>
* Mapper implementations using this MapRunnable must be thread-safe.
* <p>
* The Map-Reduce job has to be configured with the mapper to use via
* {@link #setMapperClass(Job, Class)} and
* the number of thread the thread-pool can use with the
* {@link #getNumberOfThreads(JobContext)} method. The default
* value is 10 threads.
* <p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultithreadedMapper<K1, V1, K2, V2>
extends Mapper<K1, V1, K2, V2> {
private static final Log LOG = LogFactory.getLog(MultithreadedMapper.class);
public static String NUM_THREADS = "mapreduce.mapper.multithreadedmapper.threads";
public static String MAP_CLASS = "mapreduce.mapper.multithreadedmapper.mapclass";
private Class<? extends Mapper<K1,V1,K2,V2>> mapClass;
private Context outer;
private List<MapRunner> runners;
/**
* The number of threads in the thread pool that will run the map function.
* @param job the job
* @return the number of threads
*/
public static int getNumberOfThreads(JobContext job) {
return job.getConfiguration().getInt(NUM_THREADS, 10);
}
/**
* Set the number of threads in the pool for running maps.
* @param job the job to modify
* @param threads the new number of threads
*/
public static void setNumberOfThreads(Job job, int threads) {
job.getConfiguration().setInt(NUM_THREADS, threads);
}
/**
* Get the application's mapper class.
* @param <K1> the map's input key type
* @param <V1> the map's input value type
* @param <K2> the map's output key type
* @param <V2> the map's output value type
* @param job the job
* @return the mapper class to run
*/
@SuppressWarnings("unchecked")
public static <K1,V1,K2,V2>
Class<Mapper<K1,V1,K2,V2>> getMapperClass(JobContext job) {
return (Class<Mapper<K1,V1,K2,V2>>)
job.getConfiguration().getClass(MAP_CLASS, Mapper.class);
}
/**
* Set the application's mapper class.
* @param <K1> the map input key type
* @param <V1> the map input value type
* @param <K2> the map output key type
* @param <V2> the map output value type
* @param job the job to modify
* @param cls the class to use as the mapper
*/
public static <K1,V1,K2,V2>
void setMapperClass(Job job,
Class<? extends Mapper<K1,V1,K2,V2>> cls) {
if (MultithreadedMapper.class.isAssignableFrom(cls)) {
throw new IllegalArgumentException("Can't have recursive " +
"MultithreadedMapper instances.");
}
job.getConfiguration().setClass(MAP_CLASS, cls, Mapper.class);
}
/**
* Run the application's maps using a thread pool.
*/
@Override
public void run(Context context) throws IOException, InterruptedException {
outer = context;
int numberOfThreads = getNumberOfThreads(context);
mapClass = getMapperClass(context);
if (LOG.isDebugEnabled()) {
LOG.debug("Configuring multithread runner to use " + numberOfThreads +
" threads");
}
runners = new ArrayList<MapRunner>(numberOfThreads);
for(int i=0; i < numberOfThreads; ++i) {
MapRunner thread = new MapRunner(context);
thread.start();
runners.add(i, thread);
}
for(int i=0; i < numberOfThreads; ++i) {
MapRunner thread = runners.get(i);
thread.join();
Throwable th = thread.throwable;
if (th != null) {
if (th instanceof IOException) {
throw (IOException) th;
} else if (th instanceof InterruptedException) {
throw (InterruptedException) th;
} else {
throw new RuntimeException(th);
}
}
}
}
private class SubMapRecordReader extends RecordReader<K1,V1> {
private K1 key;
private V1 value;
private Configuration conf;
@Override
public void close() throws IOException {
}
@Override
public float getProgress() throws IOException, InterruptedException {
return 0;
}
@Override
public void initialize(InputSplit split,
TaskAttemptContext context
) throws IOException, InterruptedException {
conf = context.getConfiguration();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
synchronized (outer) {
if (!outer.nextKeyValue()) {
return false;
}
key = ReflectionUtils.copy(outer.getConfiguration(),
outer.getCurrentKey(), key);
value = ReflectionUtils.copy(conf, outer.getCurrentValue(), value);
return true;
}
}
public K1 getCurrentKey() {
return key;
}
@Override
public V1 getCurrentValue() {
return value;
}
}
private class SubMapRecordWriter extends RecordWriter<K2,V2> {
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
}
@Override
public void write(K2 key, V2 value) throws IOException,
InterruptedException {
synchronized (outer) {
outer.write(key, value);
}
}
}
private class SubMapStatusReporter extends StatusReporter {
@Override
public Counter getCounter(Enum<?> name) {
return outer.getCounter(name);
}
@Override
public Counter getCounter(String group, String name) {
return outer.getCounter(group, name);
}
@Override
public void progress() {
outer.progress();
}
@Override
public void setStatus(String status) {
outer.setStatus(status);
}
@Override
public float getProgress() {
return outer.getProgress();
}
}
private class MapRunner extends Thread {
private Mapper<K1,V1,K2,V2> mapper;
private Context subcontext;
private Throwable throwable;
private RecordReader<K1,V1> reader = new SubMapRecordReader();
MapRunner(Context context) throws IOException, InterruptedException {
mapper = ReflectionUtils.newInstance(mapClass,
context.getConfiguration());
MapContext<K1, V1, K2, V2> mapContext =
new MapContextImpl<K1, V1, K2, V2>(outer.getConfiguration(),
outer.getTaskAttemptID(),
reader,
new SubMapRecordWriter(),
context.getOutputCommitter(),
new SubMapStatusReporter(),
outer.getInputSplit());
subcontext = new WrappedMapper<K1, V1, K2, V2>().getMapContext(mapContext);
reader.initialize(context.getInputSplit(), context);
}
@Override
public void run() {
try {
mapper.run(subcontext);
reader.close();
} catch (Throwable ie) {
throwable = ie;
}
}
}
}
| 9,130 | 31.379433 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.map;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
/**
* Tokenize the input values and emit each word with a count of 1.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TokenCounterMapper extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
| 1,776 | 34.54 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.map;
import java.io.IOException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
/** A {@link Mapper} that extracts text matching a regular expression. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RegexMapper<K> extends Mapper<K, Text, Text, LongWritable> {
public static String PATTERN = "mapreduce.mapper.regex";
public static String GROUP = "mapreduce.mapper.regexmapper..group";
private Pattern pattern;
private int group;
public void setup(Context context) {
Configuration conf = context.getConfiguration();
pattern = Pattern.compile(conf.get(PATTERN));
group = conf.getInt(GROUP, 0);
}
public void map(K key, Text value,
Context context)
throws IOException, InterruptedException {
String text = value.toString();
Matcher matcher = pattern.matcher(text);
while (matcher.find()) {
context.write(new Text(matcher.group(group)), new LongWritable(1));
}
}
}
| 2,109 | 34.762712 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.map;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Mapper;
/** A {@link Mapper} that swaps keys and values. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InverseMapper<K, V> extends Mapper<K,V,V,K> {
/** The inverse function. Input keys and values are swapped.*/
@Override
public void map(K key, V value, Context context
) throws IOException, InterruptedException {
context.write(value, key);
}
}
| 1,442 | 35.075 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/WrappedMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.map;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.security.Credentials;
/**
* A {@link Mapper} which wraps a given one to allow custom
* {@link Mapper.Context} implementations.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class WrappedMapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
extends Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
/**
* Get a wrapped {@link Mapper.Context} for custom implementations.
* @param mapContext <code>MapContext</code> to be wrapped
* @return a wrapped <code>Mapper.Context</code> for custom implementations
*/
public Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context
getMapContext(MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapContext) {
return new Context(mapContext);
}
@InterfaceStability.Evolving
public class Context
extends Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context {
protected MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapContext;
public Context(MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapContext) {
this.mapContext = mapContext;
}
/**
* Get the input split for this map.
*/
public InputSplit getInputSplit() {
return mapContext.getInputSplit();
}
@Override
public KEYIN getCurrentKey() throws IOException, InterruptedException {
return mapContext.getCurrentKey();
}
@Override
public VALUEIN getCurrentValue() throws IOException, InterruptedException {
return mapContext.getCurrentValue();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return mapContext.nextKeyValue();
}
@Override
public Counter getCounter(Enum<?> counterName) {
return mapContext.getCounter(counterName);
}
@Override
public Counter getCounter(String groupName, String counterName) {
return mapContext.getCounter(groupName, counterName);
}
@Override
public OutputCommitter getOutputCommitter() {
return mapContext.getOutputCommitter();
}
@Override
public void write(KEYOUT key, VALUEOUT value) throws IOException,
InterruptedException {
mapContext.write(key, value);
}
@Override
public String getStatus() {
return mapContext.getStatus();
}
@Override
public TaskAttemptID getTaskAttemptID() {
return mapContext.getTaskAttemptID();
}
@Override
public void setStatus(String msg) {
mapContext.setStatus(msg);
}
@Override
public Path[] getArchiveClassPaths() {
return mapContext.getArchiveClassPaths();
}
@Override
public String[] getArchiveTimestamps() {
return mapContext.getArchiveTimestamps();
}
@Override
public URI[] getCacheArchives() throws IOException {
return mapContext.getCacheArchives();
}
@Override
public URI[] getCacheFiles() throws IOException {
return mapContext.getCacheFiles();
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getCombinerClass()
throws ClassNotFoundException {
return mapContext.getCombinerClass();
}
@Override
public Configuration getConfiguration() {
return mapContext.getConfiguration();
}
@Override
public Path[] getFileClassPaths() {
return mapContext.getFileClassPaths();
}
@Override
public String[] getFileTimestamps() {
return mapContext.getFileTimestamps();
}
@Override
public RawComparator<?> getCombinerKeyGroupingComparator() {
return mapContext.getCombinerKeyGroupingComparator();
}
@Override
public RawComparator<?> getGroupingComparator() {
return mapContext.getGroupingComparator();
}
@Override
public Class<? extends InputFormat<?, ?>> getInputFormatClass()
throws ClassNotFoundException {
return mapContext.getInputFormatClass();
}
@Override
public String getJar() {
return mapContext.getJar();
}
@Override
public JobID getJobID() {
return mapContext.getJobID();
}
@Override
public String getJobName() {
return mapContext.getJobName();
}
@Override
public boolean getJobSetupCleanupNeeded() {
return mapContext.getJobSetupCleanupNeeded();
}
@Override
public boolean getTaskCleanupNeeded() {
return mapContext.getTaskCleanupNeeded();
}
@Override
public Path[] getLocalCacheArchives() throws IOException {
return mapContext.getLocalCacheArchives();
}
@Override
public Path[] getLocalCacheFiles() throws IOException {
return mapContext.getLocalCacheFiles();
}
@Override
public Class<?> getMapOutputKeyClass() {
return mapContext.getMapOutputKeyClass();
}
@Override
public Class<?> getMapOutputValueClass() {
return mapContext.getMapOutputValueClass();
}
@Override
public Class<? extends Mapper<?, ?, ?, ?>> getMapperClass()
throws ClassNotFoundException {
return mapContext.getMapperClass();
}
@Override
public int getMaxMapAttempts() {
return mapContext.getMaxMapAttempts();
}
@Override
public int getMaxReduceAttempts() {
return mapContext.getMaxReduceAttempts();
}
@Override
public int getNumReduceTasks() {
return mapContext.getNumReduceTasks();
}
@Override
public Class<? extends OutputFormat<?, ?>> getOutputFormatClass()
throws ClassNotFoundException {
return mapContext.getOutputFormatClass();
}
@Override
public Class<?> getOutputKeyClass() {
return mapContext.getOutputKeyClass();
}
@Override
public Class<?> getOutputValueClass() {
return mapContext.getOutputValueClass();
}
@Override
public Class<? extends Partitioner<?, ?>> getPartitionerClass()
throws ClassNotFoundException {
return mapContext.getPartitionerClass();
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getReducerClass()
throws ClassNotFoundException {
return mapContext.getReducerClass();
}
@Override
public RawComparator<?> getSortComparator() {
return mapContext.getSortComparator();
}
@Override
public boolean getSymlink() {
return mapContext.getSymlink();
}
@Override
public Path getWorkingDirectory() throws IOException {
return mapContext.getWorkingDirectory();
}
@Override
public void progress() {
mapContext.progress();
}
@Override
public boolean getProfileEnabled() {
return mapContext.getProfileEnabled();
}
@Override
public String getProfileParams() {
return mapContext.getProfileParams();
}
@Override
public IntegerRanges getProfileTaskRange(boolean isMap) {
return mapContext.getProfileTaskRange(isMap);
}
@Override
public String getUser() {
return mapContext.getUser();
}
@Override
public Credentials getCredentials() {
return mapContext.getCredentials();
}
@Override
public float getProgress() {
return mapContext.getProgress();
}
}
}
| 8,850 | 25.659639 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReaderWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* A wrapper class for a record reader that handles a single file split. It
* delegates most of the methods to the wrapped instance. A concrete subclass
* needs to provide a constructor that calls this parent constructor with the
* appropriate input format. The subclass constructor must satisfy the specific
* constructor signature that is required by
* <code>CombineFileRecordReader</code>.
*
* Subclassing is needed to get a concrete record reader wrapper because of the
* constructor requirement.
*
* @see CombineFileRecordReader
* @see CombineFileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class CombineFileRecordReaderWrapper<K,V>
extends RecordReader<K,V> {
private final FileSplit fileSplit;
private final RecordReader<K,V> delegate;
protected CombineFileRecordReaderWrapper(FileInputFormat<K,V> inputFormat,
CombineFileSplit split, TaskAttemptContext context, Integer idx)
throws IOException, InterruptedException {
fileSplit = new FileSplit(split.getPath(idx),
split.getOffset(idx),
split.getLength(idx),
split.getLocations());
delegate = inputFormat.createRecordReader(fileSplit, context);
}
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
// it really should be the same file split at the time the wrapper instance
// was created
assert fileSplitIsValid(context);
delegate.initialize(fileSplit, context);
}
private boolean fileSplitIsValid(TaskAttemptContext context) {
Configuration conf = context.getConfiguration();
long offset = conf.getLong(MRJobConfig.MAP_INPUT_START, 0L);
if (fileSplit.getStart() != offset) {
return false;
}
long length = conf.getLong(MRJobConfig.MAP_INPUT_PATH, 0L);
if (fileSplit.getLength() != length) {
return false;
}
String path = conf.get(MRJobConfig.MAP_INPUT_FILE);
if (!fileSplit.getPath().toString().equals(path)) {
return false;
}
return true;
}
public boolean nextKeyValue() throws IOException, InterruptedException {
return delegate.nextKeyValue();
}
public K getCurrentKey() throws IOException, InterruptedException {
return delegate.getCurrentKey();
}
public V getCurrentValue() throws IOException, InterruptedException {
return delegate.getCurrentValue();
}
public float getProgress() throws IOException, InterruptedException {
return delegate.getProgress();
}
public void close() throws IOException {
delegate.close();
}
}
| 3,833 | 34.174312 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This is a delegating RecordReader, which delegates the functionality to the
* underlying record reader in {@link TaggedInputSplit}
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegatingRecordReader<K, V> extends RecordReader<K, V> {
RecordReader<K, V> originalRR;
/**
* Constructs the DelegatingRecordReader.
*
* @param split TaggegInputSplit object
* @param context TaskAttemptContext object
*
* @throws IOException
* @throws InterruptedException
*/
@SuppressWarnings("unchecked")
public DelegatingRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
// Find the InputFormat and then the RecordReader from the
// TaggedInputSplit.
TaggedInputSplit taggedInputSplit = (TaggedInputSplit) split;
InputFormat<K, V> inputFormat = (InputFormat<K, V>) ReflectionUtils
.newInstance(taggedInputSplit.getInputFormatClass(), context
.getConfiguration());
originalRR = inputFormat.createRecordReader(taggedInputSplit
.getInputSplit(), context);
}
@Override
public void close() throws IOException {
originalRR.close();
}
@Override
public K getCurrentKey() throws IOException, InterruptedException {
return originalRR.getCurrentKey();
}
@Override
public V getCurrentValue() throws IOException, InterruptedException {
return originalRR.getCurrentValue();
}
@Override
public float getProgress() throws IOException, InterruptedException {
return originalRR.getProgress();
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
originalRR.initialize(((TaggedInputSplit) split).getInputSplit(), context);
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return originalRR.nextKeyValue();
}
}
| 3,182 | 33.225806 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.SplittableCompressionCodec;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import com.google.common.base.Charsets;
/** An {@link InputFormat} for plain text files. Files are broken into lines.
* Either linefeed or carriage-return are used to signal end of line. Keys are
* the position in the file, and values are the line of text.. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TextInputFormat extends FileInputFormat<LongWritable, Text> {
@Override
public RecordReader<LongWritable, Text>
createRecordReader(InputSplit split,
TaskAttemptContext context) {
String delimiter = context.getConfiguration().get(
"textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter)
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
return new LineRecordReader(recordDelimiterBytes);
}
@Override
protected boolean isSplitable(JobContext context, Path file) {
final CompressionCodec codec =
new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
if (null == codec) {
return true;
}
return codec instanceof SplittableCompressionCodec;
}
}
| 2,653 | 38.61194 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* This class treats a line in the input as a key/value pair separated by a
* separator character. The separator can be specified in config file
* under the attribute name mapreduce.input.keyvaluelinerecordreader.key.value.separator. The default
* separator is the tab character ('\t').
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class KeyValueLineRecordReader extends RecordReader<Text, Text> {
public static final String KEY_VALUE_SEPERATOR =
"mapreduce.input.keyvaluelinerecordreader.key.value.separator";
private final LineRecordReader lineRecordReader;
private byte separator = (byte) '\t';
private Text innerValue;
private Text key;
private Text value;
public Class getKeyClass() { return Text.class; }
public KeyValueLineRecordReader(Configuration conf)
throws IOException {
lineRecordReader = new LineRecordReader();
String sepStr = conf.get(KEY_VALUE_SEPERATOR, "\t");
this.separator = (byte) sepStr.charAt(0);
}
public void initialize(InputSplit genericSplit,
TaskAttemptContext context) throws IOException {
lineRecordReader.initialize(genericSplit, context);
}
public static int findSeparator(byte[] utf, int start, int length,
byte sep) {
for (int i = start; i < (start + length); i++) {
if (utf[i] == sep) {
return i;
}
}
return -1;
}
public static void setKeyValue(Text key, Text value, byte[] line,
int lineLen, int pos) {
if (pos == -1) {
key.set(line, 0, lineLen);
value.set("");
} else {
key.set(line, 0, pos);
value.set(line, pos + 1, lineLen - pos - 1);
}
}
/** Read key/value pair in a line. */
public synchronized boolean nextKeyValue()
throws IOException {
byte[] line = null;
int lineLen = -1;
if (lineRecordReader.nextKeyValue()) {
innerValue = lineRecordReader.getCurrentValue();
line = innerValue.getBytes();
lineLen = innerValue.getLength();
} else {
return false;
}
if (line == null)
return false;
if (key == null) {
key = new Text();
}
if (value == null) {
value = new Text();
}
int pos = findSeparator(line, 0, lineLen, this.separator);
setKeyValue(key, value, line, lineLen, pos);
return true;
}
public Text getCurrentKey() {
return key;
}
public Text getCurrentValue() {
return value;
}
public float getProgress() throws IOException {
return lineRecordReader.getProgress();
}
public synchronized void close() throws IOException {
lineRecordReader.close();
}
}
| 3,871 | 29.015504 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FixedLengthRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
/**
* A reader to read fixed length records from a split. Record offset is
* returned as key and the record as bytes is returned in value.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FixedLengthRecordReader
extends RecordReader<LongWritable, BytesWritable> {
private static final Log LOG
= LogFactory.getLog(FixedLengthRecordReader.class);
private int recordLength;
private long start;
private long pos;
private long end;
private long numRecordsRemainingInSplit;
private FSDataInputStream fileIn;
private Seekable filePosition;
private LongWritable key;
private BytesWritable value;
private boolean isCompressedInput;
private Decompressor decompressor;
private InputStream inputStream;
public FixedLengthRecordReader(int recordLength) {
this.recordLength = recordLength;
}
@Override
public void initialize(InputSplit genericSplit,
TaskAttemptContext context) throws IOException {
FileSplit split = (FileSplit) genericSplit;
Configuration job = context.getConfiguration();
final Path file = split.getPath();
initialize(job, split.getStart(), split.getLength(), file);
}
// This is also called from the old FixedLengthRecordReader API implementation
public void initialize(Configuration job, long splitStart, long splitLength,
Path file) throws IOException {
start = splitStart;
end = start + splitLength;
long partialRecordLength = start % recordLength;
long numBytesToSkip = 0;
if (partialRecordLength != 0) {
numBytesToSkip = recordLength - partialRecordLength;
}
// open the file and seek to the start of the split
final FileSystem fs = file.getFileSystem(job);
fileIn = fs.open(file);
CompressionCodec codec = new CompressionCodecFactory(job).getCodec(file);
if (null != codec) {
isCompressedInput = true;
decompressor = CodecPool.getDecompressor(codec);
CompressionInputStream cIn
= codec.createInputStream(fileIn, decompressor);
filePosition = cIn;
inputStream = cIn;
numRecordsRemainingInSplit = Long.MAX_VALUE;
LOG.info(
"Compressed input; cannot compute number of records in the split");
} else {
fileIn.seek(start);
filePosition = fileIn;
inputStream = fileIn;
long splitSize = end - start - numBytesToSkip;
numRecordsRemainingInSplit = (splitSize + recordLength - 1)/recordLength;
if (numRecordsRemainingInSplit < 0) {
numRecordsRemainingInSplit = 0;
}
LOG.info("Expecting " + numRecordsRemainingInSplit
+ " records each with a length of " + recordLength
+ " bytes in the split with an effective size of "
+ splitSize + " bytes");
}
if (numBytesToSkip != 0) {
start += inputStream.skip(numBytesToSkip);
}
this.pos = start;
}
@Override
public synchronized boolean nextKeyValue() throws IOException {
if (key == null) {
key = new LongWritable();
}
if (value == null) {
value = new BytesWritable(new byte[recordLength]);
}
boolean dataRead = false;
value.setSize(recordLength);
byte[] record = value.getBytes();
if (numRecordsRemainingInSplit > 0) {
key.set(pos);
int offset = 0;
int numBytesToRead = recordLength;
int numBytesRead = 0;
while (numBytesToRead > 0) {
numBytesRead = inputStream.read(record, offset, numBytesToRead);
if (numBytesRead == -1) {
// EOF
break;
}
offset += numBytesRead;
numBytesToRead -= numBytesRead;
}
numBytesRead = recordLength - numBytesToRead;
pos += numBytesRead;
if (numBytesRead > 0) {
dataRead = true;
if (numBytesRead >= recordLength) {
if (!isCompressedInput) {
numRecordsRemainingInSplit--;
}
} else {
throw new IOException("Partial record(length = " + numBytesRead
+ ") found at the end of split.");
}
} else {
numRecordsRemainingInSplit = 0L; // End of input.
}
}
return dataRead;
}
@Override
public LongWritable getCurrentKey() {
return key;
}
@Override
public BytesWritable getCurrentValue() {
return value;
}
@Override
public synchronized float getProgress() throws IOException {
if (start == end) {
return 0.0f;
} else {
return Math.min(1.0f, (getFilePosition() - start) / (float)(end - start));
}
}
@Override
public synchronized void close() throws IOException {
try {
if (inputStream != null) {
inputStream.close();
inputStream = null;
}
} finally {
if (decompressor != null) {
CodecPool.returnDecompressor(decompressor);
decompressor = null;
}
}
}
// This is called from the old FixedLengthRecordReader API implementation.
public long getPos() {
return pos;
}
private long getFilePosition() throws IOException {
long retVal;
if (isCompressedInput && null != filePosition) {
retVal = filePosition.getPos();
} else {
retVal = pos;
}
return retVal;
}
}
| 7,088 | 31.076923 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.DigestException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A class that allows a map/red job to work on a sample of sequence files.
* The sample is decided by the filter class set by the job.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileInputFilter<K, V>
extends SequenceFileInputFormat<K, V> {
public static final Log LOG = LogFactory.getLog(FileInputFormat.class);
final public static String FILTER_CLASS =
"mapreduce.input.sequencefileinputfilter.class";
final public static String FILTER_FREQUENCY =
"mapreduce.input.sequencefileinputfilter.frequency";
final public static String FILTER_REGEX =
"mapreduce.input.sequencefileinputfilter.regex";
public SequenceFileInputFilter() {
}
/** Create a record reader for the given split
* @param split file split
* @param context the task-attempt context
* @return RecordReader
*/
public RecordReader<K, V> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
context.setStatus(split.toString());
return new FilterRecordReader<K, V>(context.getConfiguration());
}
/** set the filter class
*
* @param job The job
* @param filterClass filter class
*/
public static void setFilterClass(Job job, Class<?> filterClass) {
job.getConfiguration().set(FILTER_CLASS, filterClass.getName());
}
/**
* filter interface
*/
public interface Filter extends Configurable {
/** filter function
* Decide if a record should be filtered or not
* @param key record key
* @return true if a record is accepted; return false otherwise
*/
public abstract boolean accept(Object key);
}
/**
* base class for Filters
*/
public static abstract class FilterBase implements Filter {
Configuration conf;
public Configuration getConf() {
return conf;
}
}
/** Records filter by matching key to regex
*/
public static class RegexFilter extends FilterBase {
private Pattern p;
/** Define the filtering regex and stores it in conf
* @param conf where the regex is set
* @param regex regex used as a filter
*/
public static void setPattern(Configuration conf, String regex)
throws PatternSyntaxException {
try {
Pattern.compile(regex);
} catch (PatternSyntaxException e) {
throw new IllegalArgumentException("Invalid pattern: "+regex);
}
conf.set(FILTER_REGEX, regex);
}
public RegexFilter() { }
/** configure the Filter by checking the configuration
*/
public void setConf(Configuration conf) {
String regex = conf.get(FILTER_REGEX);
if (regex == null)
throw new RuntimeException(FILTER_REGEX + "not set");
this.p = Pattern.compile(regex);
this.conf = conf;
}
/** Filtering method
* If key matches the regex, return true; otherwise return false
* @see Filter#accept(Object)
*/
public boolean accept(Object key) {
return p.matcher(key.toString()).matches();
}
}
/** This class returns a percentage of records
* The percentage is determined by a filtering frequency <i>f</i> using
* the criteria record# % f == 0.
* For example, if the frequency is 10, one out of 10 records is returned.
*/
public static class PercentFilter extends FilterBase {
private int frequency;
private int count;
/** set the frequency and stores it in conf
* @param conf configuration
* @param frequency filtering frequencey
*/
public static void setFrequency(Configuration conf, int frequency) {
if (frequency <= 0)
throw new IllegalArgumentException(
"Negative " + FILTER_FREQUENCY + ": " + frequency);
conf.setInt(FILTER_FREQUENCY, frequency);
}
public PercentFilter() { }
/** configure the filter by checking the configuration
*
* @param conf configuration
*/
public void setConf(Configuration conf) {
this.frequency = conf.getInt(FILTER_FREQUENCY, 10);
if (this.frequency <= 0) {
throw new RuntimeException(
"Negative "+FILTER_FREQUENCY + ": " + this.frequency);
}
this.conf = conf;
}
/** Filtering method
* If record# % frequency==0, return true; otherwise return false
* @see Filter#accept(Object)
*/
public boolean accept(Object key) {
boolean accepted = false;
if (count == 0)
accepted = true;
if (++count == frequency) {
count = 0;
}
return accepted;
}
}
/** This class returns a set of records by examing the MD5 digest of its
* key against a filtering frequency <i>f</i>. The filtering criteria is
* MD5(key) % f == 0.
*/
public static class MD5Filter extends FilterBase {
private int frequency;
private static final MessageDigest DIGESTER;
public static final int MD5_LEN = 16;
private byte [] digest = new byte[MD5_LEN];
static {
try {
DIGESTER = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
/** set the filtering frequency in configuration
*
* @param conf configuration
* @param frequency filtering frequency
*/
public static void setFrequency(Configuration conf, int frequency) {
if (frequency <= 0)
throw new IllegalArgumentException(
"Negative " + FILTER_FREQUENCY + ": " + frequency);
conf.setInt(FILTER_FREQUENCY, frequency);
}
public MD5Filter() { }
/** configure the filter according to configuration
*
* @param conf configuration
*/
public void setConf(Configuration conf) {
this.frequency = conf.getInt(FILTER_FREQUENCY, 10);
if (this.frequency <= 0) {
throw new RuntimeException(
"Negative " + FILTER_FREQUENCY + ": " + this.frequency);
}
this.conf = conf;
}
/** Filtering method
* If MD5(key) % frequency==0, return true; otherwise return false
* @see Filter#accept(Object)
*/
public boolean accept(Object key) {
try {
long hashcode;
if (key instanceof Text) {
hashcode = MD5Hashcode((Text)key);
} else if (key instanceof BytesWritable) {
hashcode = MD5Hashcode((BytesWritable)key);
} else {
ByteBuffer bb;
bb = Text.encode(key.toString());
hashcode = MD5Hashcode(bb.array(), 0, bb.limit());
}
if (hashcode / frequency * frequency == hashcode)
return true;
} catch(Exception e) {
LOG.warn(e);
throw new RuntimeException(e);
}
return false;
}
private long MD5Hashcode(Text key) throws DigestException {
return MD5Hashcode(key.getBytes(), 0, key.getLength());
}
private long MD5Hashcode(BytesWritable key) throws DigestException {
return MD5Hashcode(key.getBytes(), 0, key.getLength());
}
synchronized private long MD5Hashcode(byte[] bytes,
int start, int length) throws DigestException {
DIGESTER.update(bytes, 0, length);
DIGESTER.digest(digest, 0, MD5_LEN);
long hashcode=0;
for (int i = 0; i < 8; i++)
hashcode |= ((digest[i] & 0xffL) << (8 * (7 - i)));
return hashcode;
}
}
private static class FilterRecordReader<K, V>
extends SequenceFileRecordReader<K, V> {
private Filter filter;
private K key;
private V value;
public FilterRecordReader(Configuration conf)
throws IOException {
super();
// instantiate filter
filter = (Filter)ReflectionUtils.newInstance(
conf.getClass(FILTER_CLASS, PercentFilter.class), conf);
}
public synchronized boolean nextKeyValue()
throws IOException, InterruptedException {
while (super.nextKeyValue()) {
key = super.getCurrentKey();
if (filter.accept(key)) {
value = super.getCurrentValue();
return true;
}
}
return false;
}
@Override
public K getCurrentKey() {
return key;
}
@Override
public V getCurrentValue() {
return value;
}
}
}
| 10,062 | 29.868098 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.mapred.LocatedFileStatusFetcher;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.StringUtils;
import com.google.common.collect.Lists;
/**
* A base class for file-based {@link InputFormat}s.
*
* <p><code>FileInputFormat</code> is the base class for all file-based
* <code>InputFormat</code>s. This provides a generic implementation of
* {@link #getSplits(JobContext)}.
*
* Implementations of <code>FileInputFormat</code> can also override the
* {@link #isSplitable(JobContext, Path)} method to prevent input files
* from being split-up in certain situations. Implementations that may
* deal with non-splittable files <i>must</i> override this method, since
* the default implementation assumes splitting is always possible.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class FileInputFormat<K, V> extends InputFormat<K, V> {
public static final String INPUT_DIR =
"mapreduce.input.fileinputformat.inputdir";
public static final String SPLIT_MAXSIZE =
"mapreduce.input.fileinputformat.split.maxsize";
public static final String SPLIT_MINSIZE =
"mapreduce.input.fileinputformat.split.minsize";
public static final String PATHFILTER_CLASS =
"mapreduce.input.pathFilter.class";
public static final String NUM_INPUT_FILES =
"mapreduce.input.fileinputformat.numinputfiles";
public static final String INPUT_DIR_RECURSIVE =
"mapreduce.input.fileinputformat.input.dir.recursive";
public static final String LIST_STATUS_NUM_THREADS =
"mapreduce.input.fileinputformat.list-status.num-threads";
public static final int DEFAULT_LIST_STATUS_NUM_THREADS = 1;
private static final Log LOG = LogFactory.getLog(FileInputFormat.class);
private static final double SPLIT_SLOP = 1.1; // 10% slop
@Deprecated
public static enum Counter {
BYTES_READ
}
private static final PathFilter hiddenFileFilter = new PathFilter(){
public boolean accept(Path p){
String name = p.getName();
return !name.startsWith("_") && !name.startsWith(".");
}
};
/**
* Proxy PathFilter that accepts a path only if all filters given in the
* constructor do. Used by the listPaths() to apply the built-in
* hiddenFileFilter together with a user provided one (if any).
*/
private static class MultiPathFilter implements PathFilter {
private List<PathFilter> filters;
public MultiPathFilter(List<PathFilter> filters) {
this.filters = filters;
}
public boolean accept(Path path) {
for (PathFilter filter : filters) {
if (!filter.accept(path)) {
return false;
}
}
return true;
}
}
/**
* @param job
* the job to modify
* @param inputDirRecursive
*/
public static void setInputDirRecursive(Job job,
boolean inputDirRecursive) {
job.getConfiguration().setBoolean(INPUT_DIR_RECURSIVE,
inputDirRecursive);
}
/**
* @param job
* the job to look at.
* @return should the files to be read recursively?
*/
public static boolean getInputDirRecursive(JobContext job) {
return job.getConfiguration().getBoolean(INPUT_DIR_RECURSIVE,
false);
}
/**
* Get the lower bound on split size imposed by the format.
* @return the number of bytes of the minimal split for this format
*/
protected long getFormatMinSplitSize() {
return 1;
}
/**
* Is the given filename splittable? Usually, true, but if the file is
* stream compressed, it will not be.
*
* The default implementation in <code>FileInputFormat</code> always returns
* true. Implementations that may deal with non-splittable files <i>must</i>
* override this method.
*
* <code>FileInputFormat</code> implementations can override this and return
* <code>false</code> to ensure that individual input files are never split-up
* so that {@link Mapper}s process entire files.
*
* @param context the job context
* @param filename the file name to check
* @return is this file splitable?
*/
protected boolean isSplitable(JobContext context, Path filename) {
return true;
}
/**
* Set a PathFilter to be applied to the input paths for the map-reduce job.
* @param job the job to modify
* @param filter the PathFilter class use for filtering the input paths.
*/
public static void setInputPathFilter(Job job,
Class<? extends PathFilter> filter) {
job.getConfiguration().setClass(PATHFILTER_CLASS, filter,
PathFilter.class);
}
/**
* Set the minimum input split size
* @param job the job to modify
* @param size the minimum size
*/
public static void setMinInputSplitSize(Job job,
long size) {
job.getConfiguration().setLong(SPLIT_MINSIZE, size);
}
/**
* Get the minimum split size
* @param job the job
* @return the minimum number of bytes that can be in a split
*/
public static long getMinSplitSize(JobContext job) {
return job.getConfiguration().getLong(SPLIT_MINSIZE, 1L);
}
/**
* Set the maximum split size
* @param job the job to modify
* @param size the maximum split size
*/
public static void setMaxInputSplitSize(Job job,
long size) {
job.getConfiguration().setLong(SPLIT_MAXSIZE, size);
}
/**
* Get the maximum split size.
* @param context the job to look at.
* @return the maximum number of bytes a split can include
*/
public static long getMaxSplitSize(JobContext context) {
return context.getConfiguration().getLong(SPLIT_MAXSIZE,
Long.MAX_VALUE);
}
/**
* Get a PathFilter instance of the filter set for the input paths.
*
* @return the PathFilter instance set for the job, NULL if none has been set.
*/
public static PathFilter getInputPathFilter(JobContext context) {
Configuration conf = context.getConfiguration();
Class<?> filterClass = conf.getClass(PATHFILTER_CLASS, null,
PathFilter.class);
return (filterClass != null) ?
(PathFilter) ReflectionUtils.newInstance(filterClass, conf) : null;
}
/** List input directories.
* Subclasses may override to, e.g., select only files matching a regular
* expression.
*
* @param job the job to list input paths for
* @return array of FileStatus objects
* @throws IOException if zero items.
*/
protected List<FileStatus> listStatus(JobContext job
) throws IOException {
Path[] dirs = getInputPaths(job);
if (dirs.length == 0) {
throw new IOException("No input paths specified in job");
}
// get tokens for all the required FileSystems..
TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs,
job.getConfiguration());
// Whether we need to recursive look into the directory structure
boolean recursive = getInputDirRecursive(job);
// creates a MultiPathFilter with the hiddenFileFilter and the
// user provided one (if any).
List<PathFilter> filters = new ArrayList<PathFilter>();
filters.add(hiddenFileFilter);
PathFilter jobFilter = getInputPathFilter(job);
if (jobFilter != null) {
filters.add(jobFilter);
}
PathFilter inputFilter = new MultiPathFilter(filters);
List<FileStatus> result = null;
int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS,
DEFAULT_LIST_STATUS_NUM_THREADS);
StopWatch sw = new StopWatch().start();
if (numThreads == 1) {
result = singleThreadedListStatus(job, dirs, inputFilter, recursive);
} else {
Iterable<FileStatus> locatedFiles = null;
try {
LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher(
job.getConfiguration(), dirs, recursive, inputFilter, true);
locatedFiles = locatedFileStatusFetcher.getFileStatuses();
} catch (InterruptedException e) {
throw new IOException("Interrupted while getting file statuses");
}
result = Lists.newArrayList(locatedFiles);
}
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Time taken to get FileStatuses: "
+ sw.now(TimeUnit.MILLISECONDS));
}
LOG.info("Total input files to process : " + result.size());
return result;
}
private List<FileStatus> singleThreadedListStatus(JobContext job, Path[] dirs,
PathFilter inputFilter, boolean recursive) throws IOException {
List<FileStatus> result = new ArrayList<FileStatus>();
List<IOException> errors = new ArrayList<IOException>();
for (int i=0; i < dirs.length; ++i) {
Path p = dirs[i];
FileSystem fs = p.getFileSystem(job.getConfiguration());
FileStatus[] matches = fs.globStatus(p, inputFilter);
if (matches == null) {
errors.add(new IOException("Input path does not exist: " + p));
} else if (matches.length == 0) {
errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
} else {
for (FileStatus globStat: matches) {
if (globStat.isDirectory()) {
RemoteIterator<LocatedFileStatus> iter =
fs.listLocatedStatus(globStat.getPath());
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (recursive && stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(),
inputFilter);
} else {
result.add(stat);
}
}
}
} else {
result.add(globStat);
}
}
}
}
if (!errors.isEmpty()) {
throw new InvalidInputException(errors);
}
return result;
}
/**
* Add files in the input path recursively into the results.
* @param result
* The List to store all files.
* @param fs
* The FileSystem.
* @param path
* The input path.
* @param inputFilter
* The input filter that can be used to filter files/dirs.
* @throws IOException
*/
protected void addInputPathRecursively(List<FileStatus> result,
FileSystem fs, Path path, PathFilter inputFilter)
throws IOException {
RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
} else {
result.add(stat);
}
}
}
}
/**
* A factory that makes the split for this class. It can be overridden
* by sub-classes to make sub-types
*/
protected FileSplit makeSplit(Path file, long start, long length,
String[] hosts) {
return new FileSplit(file, start, length, hosts);
}
/**
* A factory that makes the split for this class. It can be overridden
* by sub-classes to make sub-types
*/
protected FileSplit makeSplit(Path file, long start, long length,
String[] hosts, String[] inMemoryHosts) {
return new FileSplit(file, start, length, hosts, inMemoryHosts);
}
/**
* Generate the list of files and make them into FileSplits.
* @param job the job context
* @throws IOException
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = new StopWatch().start();
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
long maxSize = getMaxSplitSize(job);
// generate splits
List<InputSplit> splits = new ArrayList<InputSplit>();
List<FileStatus> files = listStatus(job);
for (FileStatus file: files) {
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(job, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else { // not splitable
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
}
protected long computeSplitSize(long blockSize, long minSize,
long maxSize) {
return Math.max(minSize, Math.min(maxSize, blockSize));
}
protected int getBlockIndex(BlockLocation[] blkLocations,
long offset) {
for (int i = 0 ; i < blkLocations.length; i++) {
// is the offset inside this block?
if ((blkLocations[i].getOffset() <= offset) &&
(offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){
return i;
}
}
BlockLocation last = blkLocations[blkLocations.length -1];
long fileLength = last.getOffset() + last.getLength() -1;
throw new IllegalArgumentException("Offset " + offset +
" is outside of file (0.." +
fileLength + ")");
}
/**
* Sets the given comma separated paths as the list of inputs
* for the map-reduce job.
*
* @param job the job
* @param commaSeparatedPaths Comma separated paths to be set as
* the list of inputs for the map-reduce job.
*/
public static void setInputPaths(Job job,
String commaSeparatedPaths
) throws IOException {
setInputPaths(job, StringUtils.stringToPath(
getPathStrings(commaSeparatedPaths)));
}
/**
* Add the given comma separated paths to the list of inputs for
* the map-reduce job.
*
* @param job The job to modify
* @param commaSeparatedPaths Comma separated paths to be added to
* the list of inputs for the map-reduce job.
*/
public static void addInputPaths(Job job,
String commaSeparatedPaths
) throws IOException {
for (String str : getPathStrings(commaSeparatedPaths)) {
addInputPath(job, new Path(str));
}
}
/**
* Set the array of {@link Path}s as the list of inputs
* for the map-reduce job.
*
* @param job The job to modify
* @param inputPaths the {@link Path}s of the input directories/files
* for the map-reduce job.
*/
public static void setInputPaths(Job job,
Path... inputPaths) throws IOException {
Configuration conf = job.getConfiguration();
Path path = inputPaths[0].getFileSystem(conf).makeQualified(inputPaths[0]);
StringBuffer str = new StringBuffer(StringUtils.escapeString(path.toString()));
for(int i = 1; i < inputPaths.length;i++) {
str.append(StringUtils.COMMA_STR);
path = inputPaths[i].getFileSystem(conf).makeQualified(inputPaths[i]);
str.append(StringUtils.escapeString(path.toString()));
}
conf.set(INPUT_DIR, str.toString());
}
/**
* Add a {@link Path} to the list of inputs for the map-reduce job.
*
* @param job The {@link Job} to modify
* @param path {@link Path} to be added to the list of inputs for
* the map-reduce job.
*/
public static void addInputPath(Job job,
Path path) throws IOException {
Configuration conf = job.getConfiguration();
path = path.getFileSystem(conf).makeQualified(path);
String dirStr = StringUtils.escapeString(path.toString());
String dirs = conf.get(INPUT_DIR);
conf.set(INPUT_DIR, dirs == null ? dirStr : dirs + "," + dirStr);
}
// This method escapes commas in the glob pattern of the given paths.
private static String[] getPathStrings(String commaSeparatedPaths) {
int length = commaSeparatedPaths.length();
int curlyOpen = 0;
int pathStart = 0;
boolean globPattern = false;
List<String> pathStrings = new ArrayList<String>();
for (int i=0; i<length; i++) {
char ch = commaSeparatedPaths.charAt(i);
switch(ch) {
case '{' : {
curlyOpen++;
if (!globPattern) {
globPattern = true;
}
break;
}
case '}' : {
curlyOpen--;
if (curlyOpen == 0 && globPattern) {
globPattern = false;
}
break;
}
case ',' : {
if (!globPattern) {
pathStrings.add(commaSeparatedPaths.substring(pathStart, i));
pathStart = i + 1 ;
}
break;
}
default:
continue; // nothing special to do for this character
}
}
pathStrings.add(commaSeparatedPaths.substring(pathStart, length));
return pathStrings.toArray(new String[0]);
}
/**
* Get the list of input {@link Path}s for the map-reduce job.
*
* @param context The job
* @return the list of input {@link Path}s for the map-reduce job.
*/
public static Path[] getInputPaths(JobContext context) {
String dirs = context.getConfiguration().get(INPUT_DIR, "");
String [] list = StringUtils.split(dirs);
Path[] result = new Path[list.length];
for (int i = 0; i < list.length; i++) {
result[i] = new Path(StringUtils.unEscapeString(list[i]));
}
return result;
}
}
| 21,105 | 34.772881 | 89 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.