repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/BinaryPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.BinaryComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
/**
* Partition {@link BinaryComparable} keys using a configurable part of
* the bytes array returned by {@link BinaryComparable#getBytes()}.
*
* @see org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class BinaryPartitioner<V>
extends org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner<V>
implements Partitioner<BinaryComparable, V> {
public void configure(JobConf job) {
super.setConf(job);
}
}
| 1,606 | 35.522727 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.Progressable;
import java.io.IOException;
import java.util.*;
/**
* The MultipleOutputs class simplifies writting to additional outputs other
* than the job default output via the <code>OutputCollector</code> passed to
* the <code>map()</code> and <code>reduce()</code> methods of the
* <code>Mapper</code> and <code>Reducer</code> implementations.
* <p>
* Each additional output, or named output, may be configured with its own
* <code>OutputFormat</code>, with its own key class and with its own value
* class.
* <p>
* A named output can be a single file or a multi file. The later is refered as
* a multi named output.
* <p>
* A multi named output is an unbound set of files all sharing the same
* <code>OutputFormat</code>, key class and value class configuration.
* <p>
* When named outputs are used within a <code>Mapper</code> implementation,
* key/values written to a name output are not part of the reduce phase, only
* key/values written to the job <code>OutputCollector</code> are part of the
* reduce phase.
* <p>
* MultipleOutputs supports counters, by default the are disabled. The counters
* group is the {@link MultipleOutputs} class name.
* </p>
* The names of the counters are the same as the named outputs. For multi
* named outputs the name of the counter is the concatenation of the named
* output, and underscore '_' and the multiname.
* <p>
* Job configuration usage pattern is:
* <pre>
*
* JobConf conf = new JobConf();
*
* conf.setInputPath(inDir);
* FileOutputFormat.setOutputPath(conf, outDir);
*
* conf.setMapperClass(MOMap.class);
* conf.setReducerClass(MOReduce.class);
* ...
*
* // Defines additional single text based output 'text' for the job
* MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
* LongWritable.class, Text.class);
*
* // Defines additional multi sequencefile based output 'sequence' for the
* // job
* MultipleOutputs.addMultiNamedOutput(conf, "seq",
* SequenceFileOutputFormat.class,
* LongWritable.class, Text.class);
* ...
*
* JobClient jc = new JobClient();
* RunningJob job = jc.submitJob(conf);
*
* ...
* </pre>
* <p>
* Job configuration usage pattern is:
* <pre>
*
* public class MOReduce implements
* Reducer<WritableComparable, Writable> {
* private MultipleOutputs mos;
*
* public void configure(JobConf conf) {
* ...
* mos = new MultipleOutputs(conf);
* }
*
* public void reduce(WritableComparable key, Iterator<Writable> values,
* OutputCollector output, Reporter reporter)
* throws IOException {
* ...
* mos.getCollector("text", reporter).collect(key, new Text("Hello"));
* mos.getCollector("seq", "A", reporter).collect(key, new Text("Bye"));
* mos.getCollector("seq", "B", reporter).collect(key, new Text("Chau"));
* ...
* }
*
* public void close() throws IOException {
* mos.close();
* ...
* }
*
* }
* </pre>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultipleOutputs {
private static final String NAMED_OUTPUTS = "mo.namedOutputs";
private static final String MO_PREFIX = "mo.namedOutput.";
private static final String FORMAT = ".format";
private static final String KEY = ".key";
private static final String VALUE = ".value";
private static final String MULTI = ".multi";
private static final String COUNTERS_ENABLED = "mo.counters";
/**
* Counters group used by the counters of MultipleOutputs.
*/
private static final String COUNTERS_GROUP = MultipleOutputs.class.getName();
/**
* Checks if a named output is alreadyDefined or not.
*
* @param conf job conf
* @param namedOutput named output names
* @param alreadyDefined whether the existence/non-existence of
* the named output is to be checked
* @throws IllegalArgumentException if the output name is alreadyDefined or
* not depending on the value of the
* 'alreadyDefined' parameter
*/
private static void checkNamedOutput(JobConf conf, String namedOutput,
boolean alreadyDefined) {
List<String> definedChannels = getNamedOutputsList(conf);
if (alreadyDefined && definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' already alreadyDefined");
} else if (!alreadyDefined && !definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' not defined");
}
}
/**
* Checks if a named output name is valid token.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkTokenName(String namedOutput) {
if (namedOutput == null || namedOutput.length() == 0) {
throw new IllegalArgumentException(
"Name cannot be NULL or emtpy");
}
for (char ch : namedOutput.toCharArray()) {
if ((ch >= 'A') && (ch <= 'Z')) {
continue;
}
if ((ch >= 'a') && (ch <= 'z')) {
continue;
}
if ((ch >= '0') && (ch <= '9')) {
continue;
}
throw new IllegalArgumentException(
"Name cannot be have a '" + ch + "' char");
}
}
/**
* Checks if a named output name is valid.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkNamedOutputName(String namedOutput) {
checkTokenName(namedOutput);
// name cannot be the name used for the default output
if (namedOutput.equals("part")) {
throw new IllegalArgumentException(
"Named output name cannot be 'part'");
}
}
/**
* Returns list of channel names.
*
* @param conf job conf
* @return List of channel Names
*/
public static List<String> getNamedOutputsList(JobConf conf) {
List<String> names = new ArrayList<String>();
StringTokenizer st = new StringTokenizer(conf.get(NAMED_OUTPUTS, ""), " ");
while (st.hasMoreTokens()) {
names.add(st.nextToken());
}
return names;
}
/**
* Returns if a named output is multiple.
*
* @param conf job conf
* @param namedOutput named output
* @return <code>true</code> if the name output is multi, <code>false</code>
* if it is single. If the name output is not defined it returns
* <code>false</code>
*/
public static boolean isMultiNamedOutput(JobConf conf, String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getBoolean(MO_PREFIX + namedOutput + MULTI, false);
}
/**
* Returns the named output OutputFormat.
*
* @param conf job conf
* @param namedOutput named output
* @return namedOutput OutputFormat
*/
public static Class<? extends OutputFormat> getNamedOutputFormatClass(
JobConf conf, String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getClass(MO_PREFIX + namedOutput + FORMAT, null,
OutputFormat.class);
}
/**
* Returns the key class for a named output.
*
* @param conf job conf
* @param namedOutput named output
* @return class for the named output key
*/
public static Class<?> getNamedOutputKeyClass(JobConf conf,
String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getClass(MO_PREFIX + namedOutput + KEY, null,
Object.class);
}
/**
* Returns the value class for a named output.
*
* @param conf job conf
* @param namedOutput named output
* @return class of named output value
*/
public static Class<?> getNamedOutputValueClass(JobConf conf,
String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getClass(MO_PREFIX + namedOutput + VALUE, null,
Object.class);
}
/**
* Adds a named output for the job.
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the
* default output.
* @param outputFormatClass OutputFormat class.
* @param keyClass key class
* @param valueClass value class
*/
public static void addNamedOutput(JobConf conf, String namedOutput,
Class<? extends OutputFormat> outputFormatClass,
Class<?> keyClass, Class<?> valueClass) {
addNamedOutput(conf, namedOutput, false, outputFormatClass, keyClass,
valueClass);
}
/**
* Adds a multi named output for the job.
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the
* default output.
* @param outputFormatClass OutputFormat class.
* @param keyClass key class
* @param valueClass value class
*/
public static void addMultiNamedOutput(JobConf conf, String namedOutput,
Class<? extends OutputFormat> outputFormatClass,
Class<?> keyClass, Class<?> valueClass) {
addNamedOutput(conf, namedOutput, true, outputFormatClass, keyClass,
valueClass);
}
/**
* Adds a named output for the job.
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the
* default output.
* @param multi indicates if the named output is multi
* @param outputFormatClass OutputFormat class.
* @param keyClass key class
* @param valueClass value class
*/
private static void addNamedOutput(JobConf conf, String namedOutput,
boolean multi,
Class<? extends OutputFormat> outputFormatClass,
Class<?> keyClass, Class<?> valueClass) {
checkNamedOutputName(namedOutput);
checkNamedOutput(conf, namedOutput, true);
conf.set(NAMED_OUTPUTS, conf.get(NAMED_OUTPUTS, "") + " " + namedOutput);
conf.setClass(MO_PREFIX + namedOutput + FORMAT, outputFormatClass,
OutputFormat.class);
conf.setClass(MO_PREFIX + namedOutput + KEY, keyClass, Object.class);
conf.setClass(MO_PREFIX + namedOutput + VALUE, valueClass, Object.class);
conf.setBoolean(MO_PREFIX + namedOutput + MULTI, multi);
}
/**
* Enables or disables counters for the named outputs.
* <p>
* By default these counters are disabled.
* <p>
* MultipleOutputs supports counters, by default the are disabled.
* The counters group is the {@link MultipleOutputs} class name.
* </p>
* The names of the counters are the same as the named outputs. For multi
* named outputs the name of the counter is the concatenation of the named
* output, and underscore '_' and the multiname.
*
* @param conf job conf to enableadd the named output.
* @param enabled indicates if the counters will be enabled or not.
*/
public static void setCountersEnabled(JobConf conf, boolean enabled) {
conf.setBoolean(COUNTERS_ENABLED, enabled);
}
/**
* Returns if the counters for the named outputs are enabled or not.
* <p>
* By default these counters are disabled.
* <p>
* MultipleOutputs supports counters, by default the are disabled.
* The counters group is the {@link MultipleOutputs} class name.
* </p>
* The names of the counters are the same as the named outputs. For multi
* named outputs the name of the counter is the concatenation of the named
* output, and underscore '_' and the multiname.
*
*
* @param conf job conf to enableadd the named output.
* @return TRUE if the counters are enabled, FALSE if they are disabled.
*/
public static boolean getCountersEnabled(JobConf conf) {
return conf.getBoolean(COUNTERS_ENABLED, false);
}
// instance code, to be used from Mapper/Reducer code
private JobConf conf;
private OutputFormat outputFormat;
private Set<String> namedOutputs;
private Map<String, RecordWriter> recordWriters;
private boolean countersEnabled;
/**
* Creates and initializes multiple named outputs support, it should be
* instantiated in the Mapper/Reducer configure method.
*
* @param job the job configuration object
*/
public MultipleOutputs(JobConf job) {
this.conf = job;
outputFormat = new InternalFileOutputFormat();
namedOutputs = Collections.unmodifiableSet(
new HashSet<String>(MultipleOutputs.getNamedOutputsList(job)));
recordWriters = new HashMap<String, RecordWriter>();
countersEnabled = getCountersEnabled(job);
}
/**
* Returns iterator with the defined name outputs.
*
* @return iterator with the defined named outputs
*/
public Iterator<String> getNamedOutputs() {
return namedOutputs.iterator();
}
// by being synchronized MultipleOutputTask can be use with a
// MultithreaderMapRunner.
private synchronized RecordWriter getRecordWriter(String namedOutput,
String baseFileName,
final Reporter reporter)
throws IOException {
RecordWriter writer = recordWriters.get(baseFileName);
if (writer == null) {
if (countersEnabled && reporter == null) {
throw new IllegalArgumentException(
"Counters are enabled, Reporter cannot be NULL");
}
JobConf jobConf = new JobConf(conf);
jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput);
FileSystem fs = FileSystem.get(conf);
writer =
outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter);
if (countersEnabled) {
if (reporter == null) {
throw new IllegalArgumentException(
"Counters are enabled, Reporter cannot be NULL");
}
writer = new RecordWriterWithCounter(writer, baseFileName, reporter);
}
recordWriters.put(baseFileName, writer);
}
return writer;
}
private static class RecordWriterWithCounter implements RecordWriter {
private RecordWriter writer;
private String counterName;
private Reporter reporter;
public RecordWriterWithCounter(RecordWriter writer, String counterName,
Reporter reporter) {
this.writer = writer;
this.counterName = counterName;
this.reporter = reporter;
}
@SuppressWarnings({"unchecked"})
public void write(Object key, Object value) throws IOException {
reporter.incrCounter(COUNTERS_GROUP, counterName, 1);
writer.write(key, value);
}
public void close(Reporter reporter) throws IOException {
writer.close(reporter);
}
}
/**
* Gets the output collector for a named output.
*
* @param namedOutput the named output name
* @param reporter the reporter
* @return the output collector for the given named output
* @throws IOException thrown if output collector could not be created
*/
@SuppressWarnings({"unchecked"})
public OutputCollector getCollector(String namedOutput, Reporter reporter)
throws IOException {
return getCollector(namedOutput, null, reporter);
}
/**
* Gets the output collector for a multi named output.
*
* @param namedOutput the named output name
* @param multiName the multi name part
* @param reporter the reporter
* @return the output collector for the given named output
* @throws IOException thrown if output collector could not be created
*/
@SuppressWarnings({"unchecked"})
public OutputCollector getCollector(String namedOutput, String multiName,
Reporter reporter)
throws IOException {
checkNamedOutputName(namedOutput);
if (!namedOutputs.contains(namedOutput)) {
throw new IllegalArgumentException("Undefined named output '" +
namedOutput + "'");
}
boolean multi = isMultiNamedOutput(conf, namedOutput);
if (!multi && multiName != null) {
throw new IllegalArgumentException("Name output '" + namedOutput +
"' has not been defined as multi");
}
if (multi) {
checkTokenName(multiName);
}
String baseFileName = (multi) ? namedOutput + "_" + multiName : namedOutput;
final RecordWriter writer =
getRecordWriter(namedOutput, baseFileName, reporter);
return new OutputCollector() {
@SuppressWarnings({"unchecked"})
public void collect(Object key, Object value) throws IOException {
writer.write(key, value);
}
};
}
/**
* Closes all the opened named outputs.
* <p>
* If overriden subclasses must invoke <code>super.close()</code> at the
* end of their <code>close()</code>
*
* @throws java.io.IOException thrown if any of the MultipleOutput files
* could not be closed properly.
*/
public void close() throws IOException {
for (RecordWriter writer : recordWriters.values()) {
writer.close(null);
}
}
private static class InternalFileOutputFormat extends
FileOutputFormat<Object, Object> {
public static final String CONFIG_NAMED_OUTPUT = "mo.config.namedOutput";
@SuppressWarnings({"unchecked"})
public RecordWriter<Object, Object> getRecordWriter(
FileSystem fs, JobConf job, String baseFileName, Progressable progress)
throws IOException {
String nameOutput = job.get(CONFIG_NAMED_OUTPUT, null);
String fileName = getUniqueName(job, baseFileName);
// The following trick leverages the instantiation of a record writer via
// the job conf thus supporting arbitrary output formats.
JobConf outputConf = new JobConf(job);
outputConf.setOutputFormat(getNamedOutputFormatClass(job, nameOutput));
outputConf.setOutputKeyClass(getNamedOutputKeyClass(job, nameOutput));
outputConf.setOutputValueClass(getNamedOutputValueClass(job, nameOutput));
OutputFormat outputFormat = outputConf.getOutputFormat();
return outputFormat.getRecordWriter(fs, outputConf, fileName, progress);
}
}
}
| 19,876 | 34.431373 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.fieldsel.*;
/**
* This class implements a mapper/reducer class that can be used to perform
* field selections in a manner similar to unix cut. The input data is treated
* as fields separated by a user specified separator (the default value is
* "\t"). The user can specify a list of fields that form the map output keys,
* and a list of fields that form the map output values. If the inputformat is
* TextInputFormat, the mapper will ignore the key to the map function. and the
* fields are from the value only. Otherwise, the fields are the union of those
* from the key and those from the value.
*
* The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
*
* The map output field list spec is under attribute
* "mapreduce.fieldsel.map.output.key.value.fields.spec".
* The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
* key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
* Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
* (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
* the fields starting from field 3. The open range field spec applies value fields only.
* They have no effect on the key fields.
*
* Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
* and use fields 6,5,1,2,3,7 and above for values.
*
* The reduce output field list spec is under attribute
* "mapreduce.fieldsel.reduce.output.key.value.fields.spec".
*
* The reducer extracts output key/value pairs in a similar manner, except that
* the key is never ignored.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FieldSelectionMapReduce<K, V>
implements Mapper<K, V, Text, Text>, Reducer<Text, Text, Text, Text> {
private String mapOutputKeyValueSpec;
private boolean ignoreInputKey;
private String fieldSeparator = "\t";
private List<Integer> mapOutputKeyFieldList = new ArrayList<Integer>();
private List<Integer> mapOutputValueFieldList = new ArrayList<Integer>();
private int allMapValueFieldsFrom = -1;
private String reduceOutputKeyValueSpec;
private List<Integer> reduceOutputKeyFieldList = new ArrayList<Integer>();
private List<Integer> reduceOutputValueFieldList = new ArrayList<Integer>();
private int allReduceValueFieldsFrom = -1;
public static final Log LOG = LogFactory.getLog("FieldSelectionMapReduce");
private String specToString() {
StringBuffer sb = new StringBuffer();
sb.append("fieldSeparator: ").append(fieldSeparator).append("\n");
sb.append("mapOutputKeyValueSpec: ").append(mapOutputKeyValueSpec).append(
"\n");
sb.append("reduceOutputKeyValueSpec: ").append(reduceOutputKeyValueSpec)
.append("\n");
sb.append("allMapValueFieldsFrom: ").append(allMapValueFieldsFrom).append(
"\n");
sb.append("allReduceValueFieldsFrom: ").append(allReduceValueFieldsFrom)
.append("\n");
int i = 0;
sb.append("mapOutputKeyFieldList.length: ").append(
mapOutputKeyFieldList.size()).append("\n");
for (i = 0; i < mapOutputKeyFieldList.size(); i++) {
sb.append("\t").append(mapOutputKeyFieldList.get(i)).append("\n");
}
sb.append("mapOutputValueFieldList.length: ").append(
mapOutputValueFieldList.size()).append("\n");
for (i = 0; i < mapOutputValueFieldList.size(); i++) {
sb.append("\t").append(mapOutputValueFieldList.get(i)).append("\n");
}
sb.append("reduceOutputKeyFieldList.length: ").append(
reduceOutputKeyFieldList.size()).append("\n");
for (i = 0; i < reduceOutputKeyFieldList.size(); i++) {
sb.append("\t").append(reduceOutputKeyFieldList.get(i)).append("\n");
}
sb.append("reduceOutputValueFieldList.length: ").append(
reduceOutputValueFieldList.size()).append("\n");
for (i = 0; i < reduceOutputValueFieldList.size(); i++) {
sb.append("\t").append(reduceOutputValueFieldList.get(i)).append("\n");
}
return sb.toString();
}
/**
* The identify function. Input key/value pair is written directly to output.
*/
public void map(K key, V val,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
FieldSelectionHelper helper = new FieldSelectionHelper(
FieldSelectionHelper.emptyText, FieldSelectionHelper.emptyText);
helper.extractOutputKeyValue(key.toString(), val.toString(),
fieldSeparator, mapOutputKeyFieldList, mapOutputValueFieldList,
allMapValueFieldsFrom, ignoreInputKey, true);
output.collect(helper.getKey(), helper.getValue());
}
private void parseOutputKeyValueSpec() {
allMapValueFieldsFrom = FieldSelectionHelper.parseOutputKeyValueSpec(
mapOutputKeyValueSpec, mapOutputKeyFieldList, mapOutputValueFieldList);
allReduceValueFieldsFrom = FieldSelectionHelper.parseOutputKeyValueSpec(
reduceOutputKeyValueSpec, reduceOutputKeyFieldList,
reduceOutputValueFieldList);
}
public void configure(JobConf job) {
this.fieldSeparator = job.get(FieldSelectionHelper.DATA_FIELD_SEPERATOR,
"\t");
this.mapOutputKeyValueSpec = job.get(
FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC, "0-:");
this.ignoreInputKey = TextInputFormat.class.getCanonicalName().equals(
job.getInputFormat().getClass().getCanonicalName());
this.reduceOutputKeyValueSpec = job.get(
FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC, "0-:");
parseOutputKeyValueSpec();
LOG.info(specToString());
}
public void close() throws IOException {
// TODO Auto-generated method stub
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
String keyStr = key.toString() + this.fieldSeparator;
while (values.hasNext()) {
FieldSelectionHelper helper = new FieldSelectionHelper();
helper.extractOutputKeyValue(keyStr, values.next().toString(),
fieldSeparator, reduceOutputKeyFieldList,
reduceOutputValueFieldList, allReduceValueFieldsFrom, false, false);
output.collect(helper.getKey(), helper.getValue());
}
}
}
| 7,767 | 39.458333 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapreduce.JobContext;
/**
* This comparator implementation provides a subset of the features provided
* by the Unix/GNU Sort. In particular, the supported features are:
* -n, (Sort numerically)
* -r, (Reverse the result of comparison)
* -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
* of the field to use, and c is the number of the first character from the
* beginning of the field. Fields and character posns are numbered starting
* with 1; a character position of zero in pos2 indicates the field's last
* character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
* of the field); if omitted from pos2, it defaults to 0 (the end of the
* field). opts are ordering options (any of 'nr' as described above).
* We assume that the fields in the key are separated by
* {@link JobContext#MAP_OUTPUT_KEY_FIELD_SEPERATOR}
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class KeyFieldBasedComparator<K, V> extends
org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedComparator<K, V>
implements JobConfigurable {
public void configure(JobConf job) {
super.setConf(job);
}
}
| 2,253 | 42.346154 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineSequenceFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
/**
* Input format that is a <code>CombineFileInputFormat</code>-equivalent for
* <code>SequenceFileInputFormat</code>.
*
* @see CombineFileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CombineSequenceFileInputFormat<K,V>
extends CombineFileInputFormat<K,V> {
@SuppressWarnings({ "rawtypes", "unchecked" })
public RecordReader<K,V> getRecordReader(InputSplit split, JobConf conf,
Reporter reporter) throws IOException {
return new CombineFileRecordReader(conf, (CombineFileSplit)split, reporter,
SequenceFileRecordReaderWrapper.class);
}
/**
* A record reader that may be passed to <code>CombineFileRecordReader</code>
* so that it can be used in a <code>CombineFileInputFormat</code>-equivalent
* for <code>SequenceFileInputFormat</code>.
*
* @see CombineFileRecordReader
* @see CombineFileInputFormat
* @see SequenceFileInputFormat
*/
private static class SequenceFileRecordReaderWrapper<K,V>
extends CombineFileRecordReaderWrapper<K,V> {
// this constructor signature is required by CombineFileRecordReader
public SequenceFileRecordReaderWrapper(CombineFileSplit split,
Configuration conf, Reporter reporter, Integer idx) throws IOException {
super(new SequenceFileInputFormat<K,V>(), split, conf, reporter, idx);
}
}
}
| 2,615 | 38.044776 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/LongSumReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
/**
* A {@link Reducer} that sums long values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongSumReducer<K> extends MapReduceBase
implements Reducer<K, LongWritable, K, LongWritable> {
public void reduce(K key, Iterator<LongWritable> values,
OutputCollector<K, LongWritable> output,
Reporter reporter)
throws IOException {
// sum all values for this key
long sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
// output sum
output.collect(key, new LongWritable(sum));
}
}
| 1,850 | 31.473684 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/Chain.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.GenericsUtil;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* The Chain class provides all the common functionality for the
* {@link ChainMapper} and the {@link ChainReducer} classes.
*/
class Chain extends org.apache.hadoop.mapreduce.lib.chain.Chain {
private static final String MAPPER_BY_VALUE = "chain.mapper.byValue";
private static final String REDUCER_BY_VALUE = "chain.reducer.byValue";
private JobConf chainJobConf;
private List<Mapper> mappers = new ArrayList<Mapper>();
private Reducer reducer;
// to cache the key/value output class serializations for each chain element
// to avoid everytime lookup.
private List<Serialization> mappersKeySerialization =
new ArrayList<Serialization>();
private List<Serialization> mappersValueSerialization =
new ArrayList<Serialization>();
private Serialization reducerKeySerialization;
private Serialization reducerValueSerialization;
/**
* Creates a Chain instance configured for a Mapper or a Reducer.
*
* @param isMap TRUE indicates the chain is for a Mapper, FALSE that is for a
* Reducer.
*/
Chain(boolean isMap) {
super(isMap);
}
/**
* Adds a Mapper class to the chain job's JobConf.
* <p/>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Mapper.
*
* @param isMap indicates if the Chain is for a Mapper or for a
* Reducer.
* @param jobConf chain job's JobConf to add the Mapper class.
* @param klass the Mapper class to add.
* @param inputKeyClass mapper input key class.
* @param inputValueClass mapper input value class.
* @param outputKeyClass mapper output key class.
* @param outputValueClass mapper output value class.
* @param byValue indicates if key/values should be passed by value
* to the next Mapper in the chain, if any.
* @param mapperConf a JobConf with the configuration for the Mapper
* class. It is recommended to use a JobConf without default values using the
* <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.
*/
public static <K1, V1, K2, V2> void addMapper(boolean isMap, JobConf jobConf,
Class<? extends Mapper<K1, V1, K2, V2>> klass,
Class<? extends K1> inputKeyClass,
Class<? extends V1> inputValueClass,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass,
boolean byValue, JobConf mapperConf) {
String prefix = getPrefix(isMap);
// if a reducer chain check the Reducer has been already set
checkReducerAlreadySet(isMap, jobConf, prefix, true);
// set the mapper class
int index = getIndex(jobConf, prefix);
jobConf.setClass(prefix + CHAIN_MAPPER_CLASS + index, klass, Mapper.class);
validateKeyValueTypes(isMap, jobConf, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, index, prefix);
// if the Mapper does not have a private JobConf create an empty one
if (mapperConf == null) {
// using a JobConf without defaults to make it lightweight.
// still the chain JobConf may have all defaults and this conf is
// overlapped to the chain JobConf one.
mapperConf = new JobConf(true);
}
// store in the private mapper conf if it works by value or by reference
mapperConf.setBoolean(MAPPER_BY_VALUE, byValue);
setMapperConf(isMap, jobConf, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, mapperConf, index, prefix);
}
/**
* Sets the Reducer class to the chain job's JobConf.
* <p/>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Reducer.
*
* @param jobConf chain job's JobConf to add the Reducer class.
* @param klass the Reducer class to add.
* @param inputKeyClass reducer input key class.
* @param inputValueClass reducer input value class.
* @param outputKeyClass reducer output key class.
* @param outputValueClass reducer output value class.
* @param byValue indicates if key/values should be passed by value
* to the next Mapper in the chain, if any.
* @param reducerConf a JobConf with the configuration for the Reducer
* class. It is recommended to use a JobConf without default values using the
* <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.
*/
public static <K1, V1, K2, V2> void setReducer(JobConf jobConf,
Class<? extends Reducer<K1, V1, K2, V2>> klass,
Class<? extends K1> inputKeyClass,
Class<? extends V1> inputValueClass,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass,
boolean byValue, JobConf reducerConf) {
String prefix = getPrefix(false);
checkReducerAlreadySet(false, jobConf, prefix, false);
jobConf.setClass(prefix + CHAIN_REDUCER_CLASS, klass, Reducer.class);
// if the Reducer does not have a private JobConf create an empty one
if (reducerConf == null) {
// using a JobConf without defaults to make it lightweight.
// still the chain JobConf may have all defaults and this conf is
// overlapped to the chain JobConf one.
reducerConf = new JobConf(false);
}
// store in the private reducer conf the input/output classes of the reducer
// and if it works by value or by reference
reducerConf.setBoolean(REDUCER_BY_VALUE, byValue);
setReducerConf(jobConf, inputKeyClass, inputValueClass, outputKeyClass,
outputValueClass, reducerConf, prefix);
}
/**
* Configures all the chain elements for the task.
*
* @param jobConf chain job's JobConf.
*/
public void configure(JobConf jobConf) {
String prefix = getPrefix(isMap);
chainJobConf = jobConf;
SerializationFactory serializationFactory =
new SerializationFactory(chainJobConf);
int index = jobConf.getInt(prefix + CHAIN_MAPPER_SIZE, 0);
for (int i = 0; i < index; i++) {
Class<? extends Mapper> klass =
jobConf.getClass(prefix + CHAIN_MAPPER_CLASS + i, null, Mapper.class);
JobConf mConf = new JobConf(
getChainElementConf(jobConf, prefix + CHAIN_MAPPER_CONFIG + i));
Mapper mapper = ReflectionUtils.newInstance(klass, mConf);
mappers.add(mapper);
if (mConf.getBoolean(MAPPER_BY_VALUE, true)) {
mappersKeySerialization.add(serializationFactory.getSerialization(
mConf.getClass(MAPPER_OUTPUT_KEY_CLASS, null)));
mappersValueSerialization.add(serializationFactory.getSerialization(
mConf.getClass(MAPPER_OUTPUT_VALUE_CLASS, null)));
} else {
mappersKeySerialization.add(null);
mappersValueSerialization.add(null);
}
}
Class<? extends Reducer> klass =
jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null, Reducer.class);
if (klass != null) {
JobConf rConf = new JobConf(
getChainElementConf(jobConf, prefix + CHAIN_REDUCER_CONFIG));
reducer = ReflectionUtils.newInstance(klass, rConf);
if (rConf.getBoolean(REDUCER_BY_VALUE, true)) {
reducerKeySerialization = serializationFactory
.getSerialization(rConf.getClass(REDUCER_OUTPUT_KEY_CLASS, null));
reducerValueSerialization = serializationFactory
.getSerialization(rConf.getClass(REDUCER_OUTPUT_VALUE_CLASS, null));
} else {
reducerKeySerialization = null;
reducerValueSerialization = null;
}
}
}
/**
* Returns the chain job conf.
*
* @return the chain job conf.
*/
protected JobConf getChainJobConf() {
return chainJobConf;
}
/**
* Returns the first Mapper instance in the chain.
*
* @return the first Mapper instance in the chain or NULL if none.
*/
public Mapper getFirstMap() {
return (mappers.size() > 0) ? mappers.get(0) : null;
}
/**
* Returns the Reducer instance in the chain.
*
* @return the Reducer instance in the chain or NULL if none.
*/
public Reducer getReducer() {
return reducer;
}
/**
* Returns the OutputCollector to be used by a Mapper instance in the chain.
*
* @param mapperIndex index of the Mapper instance to get the OutputCollector.
* @param output the original OutputCollector of the task.
* @param reporter the reporter of the task.
* @return the OutputCollector to be used in the chain.
*/
@SuppressWarnings({"unchecked"})
public OutputCollector getMapperCollector(int mapperIndex,
OutputCollector output,
Reporter reporter) {
Serialization keySerialization = mappersKeySerialization.get(mapperIndex);
Serialization valueSerialization =
mappersValueSerialization.get(mapperIndex);
return new ChainOutputCollector(mapperIndex, keySerialization,
valueSerialization, output, reporter);
}
/**
* Returns the OutputCollector to be used by a Mapper instance in the chain.
*
* @param output the original OutputCollector of the task.
* @param reporter the reporter of the task.
* @return the OutputCollector to be used in the chain.
*/
@SuppressWarnings({"unchecked"})
public OutputCollector getReducerCollector(OutputCollector output,
Reporter reporter) {
return new ChainOutputCollector(reducerKeySerialization,
reducerValueSerialization, output,
reporter);
}
/**
* Closes all the chain elements.
*
* @throws IOException thrown if any of the chain elements threw an
* IOException exception.
*/
public void close() throws IOException {
for (Mapper map : mappers) {
map.close();
}
if (reducer != null) {
reducer.close();
}
}
// using a ThreadLocal to reuse the ByteArrayOutputStream used for ser/deser
// it has to be a thread local because if not it would break if used from a
// MultiThreadedMapRunner.
private final ThreadLocal<DataOutputBuffer> threadLocalDataOutputBuffer =
new ThreadLocal<DataOutputBuffer>() {
protected DataOutputBuffer initialValue() {
return new DataOutputBuffer(1024);
}
};
/**
* OutputCollector implementation used by the chain tasks.
* <p/>
* If it is not the end of the chain, a {@link #collect} invocation invokes
* the next Mapper in the chain. If it is the end of the chain the task
* OutputCollector is called.
*/
private class ChainOutputCollector<K, V> implements OutputCollector<K, V> {
private int nextMapperIndex;
private Serialization<K> keySerialization;
private Serialization<V> valueSerialization;
private OutputCollector output;
private Reporter reporter;
/*
* Constructor for Mappers
*/
public ChainOutputCollector(int index, Serialization<K> keySerialization,
Serialization<V> valueSerialization,
OutputCollector output, Reporter reporter) {
this.nextMapperIndex = index + 1;
this.keySerialization = keySerialization;
this.valueSerialization = valueSerialization;
this.output = output;
this.reporter = reporter;
}
/*
* Constructor for Reducer
*/
public ChainOutputCollector(Serialization<K> keySerialization,
Serialization<V> valueSerialization,
OutputCollector output, Reporter reporter) {
this.nextMapperIndex = 0;
this.keySerialization = keySerialization;
this.valueSerialization = valueSerialization;
this.output = output;
this.reporter = reporter;
}
@SuppressWarnings({"unchecked"})
public void collect(K key, V value) throws IOException {
if (nextMapperIndex < mappers.size()) {
// there is a next mapper in chain
// only need to ser/deser if there is next mapper in the chain
if (keySerialization != null) {
key = makeCopyForPassByValue(keySerialization, key);
value = makeCopyForPassByValue(valueSerialization, value);
}
// gets ser/deser and mapper of next in chain
Serialization nextKeySerialization =
mappersKeySerialization.get(nextMapperIndex);
Serialization nextValueSerialization =
mappersValueSerialization.get(nextMapperIndex);
Mapper nextMapper = mappers.get(nextMapperIndex);
// invokes next mapper in chain
nextMapper.map(key, value,
new ChainOutputCollector(nextMapperIndex,
nextKeySerialization,
nextValueSerialization,
output, reporter),
reporter);
} else {
// end of chain, user real output collector
output.collect(key, value);
}
}
private <E> E makeCopyForPassByValue(Serialization<E> serialization,
E obj) throws IOException {
Serializer<E> ser =
serialization.getSerializer(GenericsUtil.getClass(obj));
Deserializer<E> deser =
serialization.getDeserializer(GenericsUtil.getClass(obj));
DataOutputBuffer dof = threadLocalDataOutputBuffer.get();
dof.reset();
ser.open(dof);
ser.serialize(obj);
ser.close();
obj = ReflectionUtils.newInstance(GenericsUtil.getClass(obj),
getChainJobConf());
ByteArrayInputStream bais =
new ByteArrayInputStream(dof.getData(), 0, dof.getLength());
deser.open(bais);
deser.deserialize(obj);
deser.close();
return obj;
}
}
}
| 15,493 | 38.027708 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A Convenience class that creates output lazily.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LazyOutputFormat<K, V> extends FilterOutputFormat<K, V> {
/**
* Set the underlying output format for LazyOutputFormat.
* @param job the {@link JobConf} to modify
* @param theClass the underlying class
*/
@SuppressWarnings("unchecked")
public static void setOutputFormatClass(JobConf job,
Class<? extends OutputFormat> theClass) {
job.setOutputFormat(LazyOutputFormat.class);
job.setClass("mapreduce.output.lazyoutputformat.outputformat", theClass, OutputFormat.class);
}
@Override
public RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job,
String name, Progressable progress) throws IOException {
if (baseOut == null) {
getBaseOutputFormat(job);
}
return new LazyRecordWriter<K, V>(job, baseOut, name, progress);
}
@Override
public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws IOException {
if (baseOut == null) {
getBaseOutputFormat(job);
}
super.checkOutputSpecs(ignored, job);
}
@SuppressWarnings("unchecked")
private void getBaseOutputFormat(JobConf job) throws IOException {
baseOut = ReflectionUtils.newInstance(
job.getClass("mapreduce.output.lazyoutputformat.outputformat", null, OutputFormat.class),
job);
if (baseOut == null) {
throw new IOException("Ouput format not set for LazyOutputFormat");
}
}
/**
* <code>LazyRecordWriter</code> is a convenience
* class that works with LazyOutputFormat.
*/
private static class LazyRecordWriter<K,V> extends FilterRecordWriter<K,V> {
final OutputFormat of;
final String name;
final Progressable progress;
final JobConf job;
public LazyRecordWriter(JobConf job, OutputFormat of, String name,
Progressable progress) throws IOException {
this.of = of;
this.job = job;
this.name = name;
this.progress = progress;
}
@Override
public void close(Reporter reporter) throws IOException {
if (rawWriter != null) {
rawWriter.close(reporter);
}
}
@Override
public void write(K key, V value) throws IOException {
if (rawWriter == null) {
createRecordWriter();
}
super.write(key, value);
}
@SuppressWarnings("unchecked")
private void createRecordWriter() throws IOException {
FileSystem fs = FileSystem.get(job);
rawWriter = of.getRecordWriter(fs, job, name, progress);
}
}
}
| 3,895 | 31.198347 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.*;
import java.lang.reflect.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* A generic RecordReader that can hand out different recordReaders
* for each chunk in a {@link CombineFileSplit}.
* A CombineFileSplit can combine data chunks from multiple files.
* This class allows using different RecordReaders for processing
* these data chunks from different files.
* @see CombineFileSplit
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CombineFileRecordReader<K, V> implements RecordReader<K, V> {
static final Class [] constructorSignature = new Class []
{CombineFileSplit.class,
Configuration.class,
Reporter.class,
Integer.class};
protected CombineFileSplit split;
protected JobConf jc;
protected Reporter reporter;
protected Constructor<RecordReader<K, V>> rrConstructor;
protected int idx;
protected long progress;
protected RecordReader<K, V> curReader;
public boolean next(K key, V value) throws IOException {
while ((curReader == null) || !curReader.next(key, value)) {
if (!initNextRecordReader()) {
return false;
}
}
return true;
}
public K createKey() {
return curReader.createKey();
}
public V createValue() {
return curReader.createValue();
}
/**
* return the amount of data processed
*/
public long getPos() throws IOException {
return progress;
}
public void close() throws IOException {
if (curReader != null) {
curReader.close();
curReader = null;
}
}
/**
* return progress based on the amount of data processed so far.
*/
public float getProgress() throws IOException {
return Math.min(1.0f, progress/(float)(split.getLength()));
}
/**
* A generic RecordReader that can hand out different recordReaders
* for each chunk in the CombineFileSplit.
*/
public CombineFileRecordReader(JobConf job, CombineFileSplit split,
Reporter reporter,
Class<RecordReader<K, V>> rrClass)
throws IOException {
this.split = split;
this.jc = job;
this.reporter = reporter;
this.idx = 0;
this.curReader = null;
this.progress = 0;
try {
rrConstructor = rrClass.getDeclaredConstructor(constructorSignature);
rrConstructor.setAccessible(true);
} catch (Exception e) {
throw new RuntimeException(rrClass.getName() +
" does not have valid constructor", e);
}
initNextRecordReader();
}
/**
* Get the record reader for the next chunk in this CombineFileSplit.
*/
protected boolean initNextRecordReader() throws IOException {
if (curReader != null) {
curReader.close();
curReader = null;
if (idx > 0) {
progress += split.getLength(idx-1); // done processing so far
}
}
// if all chunks have been processed, nothing more to do.
if (idx == split.getNumPaths()) {
return false;
}
reporter.progress();
// get a record reader for the idx-th chunk
try {
curReader = rrConstructor.newInstance(new Object []
{split, jc, reporter, Integer.valueOf(idx)});
// setup some helper config variables.
jc.set(JobContext.MAP_INPUT_FILE, split.getPath(idx).toString());
jc.setLong(JobContext.MAP_INPUT_START, split.getOffset(idx));
jc.setLong(JobContext.MAP_INPUT_PATH, split.getLength(idx));
} catch (Exception e) {
throw new RuntimeException (e);
}
idx++;
return true;
}
}
| 4,769 | 29.576923 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import java.io.IOException;
/**
* The ChainMapper class allows to use multiple Mapper classes within a single
* Map task.
* <p>
* The Mapper classes are invoked in a chained (or piped) fashion, the output of
* the first becomes the input of the second, and so on until the last Mapper,
* the output of the last Mapper will be written to the task's output.
* <p>
* The key functionality of this feature is that the Mappers in the chain do not
* need to be aware that they are executed in a chain. This enables having
* reusable specialized Mappers that can be combined to perform composite
* operations within a single task.
* <p>
* Special care has to be taken when creating chains that the key/values output
* by a Mapper are valid for the following Mapper in the chain. It is assumed
* all Mappers and the Reduce in the chain use maching output and input key and
* value classes as no conversion is done by the chaining code.
* <p>
* Using the ChainMapper and the ChainReducer classes is possible to compose
* Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
* immediate benefit of this pattern is a dramatic reduction in disk IO.
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainMapper, this is done by the addMapper for the last mapper in the chain.
* <p>
* ChainMapper usage pattern:
* <p>
* <pre>
* ...
* conf.setJobName("chain");
* conf.setInputFormat(TextInputFormat.class);
* conf.setOutputFormat(TextOutputFormat.class);
*
* JobConf mapAConf = new JobConf(false);
* ...
* ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
* Text.class, Text.class, true, mapAConf);
*
* JobConf mapBConf = new JobConf(false);
* ...
* ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
* LongWritable.class, Text.class, false, mapBConf);
*
* JobConf reduceConf = new JobConf(false);
* ...
* ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
* Text.class, Text.class, true, reduceConf);
*
* ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
* LongWritable.class, Text.class, false, null);
*
* ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
* LongWritable.class, LongWritable.class, true, null);
*
* FileInputFormat.setInputPaths(conf, inDir);
* FileOutputFormat.setOutputPath(conf, outDir);
* ...
*
* JobClient jc = new JobClient(conf);
* RunningJob job = jc.submitJob(conf);
* ...
* </pre>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ChainMapper implements Mapper {
/**
* Adds a Mapper class to the chain job's JobConf.
* <p>
* It has to be specified how key and values are passed from one element of
* the chain to the next, by value or by reference. If a Mapper leverages the
* assumed semantics that the key and values are not modified by the collector
* 'by value' must be used. If the Mapper does not expect this semantics, as
* an optimization to avoid serialization and deserialization 'by reference'
* can be used.
* <p>
* For the added Mapper the configuration given for it,
* <code>mapperConf</code>, have precedence over the job's JobConf. This
* precedence is in effect when the task is running.
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainMapper, this is done by the addMapper for the last mapper in the chain
* <p>
*
* @param job job's JobConf to add the Mapper class.
* @param klass the Mapper class to add.
* @param inputKeyClass mapper input key class.
* @param inputValueClass mapper input value class.
* @param outputKeyClass mapper output key class.
* @param outputValueClass mapper output value class.
* @param byValue indicates if key/values should be passed by value
* to the next Mapper in the chain, if any.
* @param mapperConf a JobConf with the configuration for the Mapper
* class. It is recommended to use a JobConf without default values using the
* <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.
*/
public static <K1, V1, K2, V2> void addMapper(JobConf job,
Class<? extends Mapper<K1, V1, K2, V2>> klass,
Class<? extends K1> inputKeyClass,
Class<? extends V1> inputValueClass,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass,
boolean byValue, JobConf mapperConf) {
job.setMapperClass(ChainMapper.class);
job.setMapOutputKeyClass(outputKeyClass);
job.setMapOutputValueClass(outputValueClass);
Chain.addMapper(true, job, klass, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, byValue, mapperConf);
}
private Chain chain;
/**
* Constructor.
*/
public ChainMapper() {
chain = new Chain(true);
}
/**
* Configures the ChainMapper and all the Mappers in the chain.
* <p>
* If this method is overriden <code>super.configure(...)</code> should be
* invoked at the beginning of the overwriter method.
*/
public void configure(JobConf job) {
chain.configure(job);
}
/**
* Chains the <code>map(...)</code> methods of the Mappers in the chain.
*/
@SuppressWarnings({"unchecked"})
public void map(Object key, Object value, OutputCollector output,
Reporter reporter) throws IOException {
Mapper mapper = chain.getFirstMap();
if (mapper != null) {
mapper.map(key, value, chain.getMapperCollector(0, output, reporter),
reporter);
}
}
/**
* Closes the ChainMapper and all the Mappers in the chain.
* <p>
* If this method is overriden <code>super.close()</code> should be
* invoked at the end of the overwriter method.
*/
public void close() throws IOException {
chain.close();
}
}
| 7,176 | 38.218579 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/IdentityReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.MapReduceBase;
/**
* Performs no reduction, writing all input values directly to the output.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class IdentityReducer<K, V>
extends MapReduceBase implements Reducer<K, V, K, V> {
/** Writes all keys and values directly to output. */
public void reduce(K key, Iterator<V> values,
OutputCollector<K, V> output, Reporter reporter)
throws IOException {
while (values.hasNext()) {
output.collect(key, values.next());
}
}
}
| 1,722 | 33.46 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/DelegatingInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link InputFormat} that delegates behaviour of paths to multiple other
* InputFormats.
*
* @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegatingInputFormat<K, V> implements InputFormat<K, V> {
public InputSplit[] getSplits(JobConf conf, int numSplits) throws IOException {
JobConf confCopy = new JobConf(conf);
List<InputSplit> splits = new ArrayList<InputSplit>();
Map<Path, InputFormat> formatMap = MultipleInputs.getInputFormatMap(conf);
Map<Path, Class<? extends Mapper>> mapperMap = MultipleInputs
.getMapperTypeMap(conf);
Map<Class<? extends InputFormat>, List<Path>> formatPaths
= new HashMap<Class<? extends InputFormat>, List<Path>>();
// First, build a map of InputFormats to Paths
for (Entry<Path, InputFormat> entry : formatMap.entrySet()) {
if (!formatPaths.containsKey(entry.getValue().getClass())) {
formatPaths.put(entry.getValue().getClass(), new LinkedList<Path>());
}
formatPaths.get(entry.getValue().getClass()).add(entry.getKey());
}
for (Entry<Class<? extends InputFormat>, List<Path>> formatEntry :
formatPaths.entrySet()) {
Class<? extends InputFormat> formatClass = formatEntry.getKey();
InputFormat format = (InputFormat) ReflectionUtils.newInstance(
formatClass, conf);
List<Path> paths = formatEntry.getValue();
Map<Class<? extends Mapper>, List<Path>> mapperPaths
= new HashMap<Class<? extends Mapper>, List<Path>>();
// Now, for each set of paths that have a common InputFormat, build
// a map of Mappers to the paths they're used for
for (Path path : paths) {
Class<? extends Mapper> mapperClass = mapperMap.get(path);
if (!mapperPaths.containsKey(mapperClass)) {
mapperPaths.put(mapperClass, new LinkedList<Path>());
}
mapperPaths.get(mapperClass).add(path);
}
// Now each set of paths that has a common InputFormat and Mapper can
// be added to the same job, and split together.
for (Entry<Class<? extends Mapper>, List<Path>> mapEntry : mapperPaths
.entrySet()) {
paths = mapEntry.getValue();
Class<? extends Mapper> mapperClass = mapEntry.getKey();
if (mapperClass == null) {
mapperClass = conf.getMapperClass();
}
FileInputFormat.setInputPaths(confCopy, paths.toArray(new Path[paths
.size()]));
// Get splits for each input path and tag with InputFormat
// and Mapper types by wrapping in a TaggedInputSplit.
InputSplit[] pathSplits = format.getSplits(confCopy, numSplits);
for (InputSplit pathSplit : pathSplits) {
splits.add(new TaggedInputSplit(pathSplit, conf, format.getClass(),
mapperClass));
}
}
}
return splits.toArray(new InputSplit[splits.size()]);
}
@SuppressWarnings("unchecked")
public RecordReader<K, V> getRecordReader(InputSplit split, JobConf conf,
Reporter reporter) throws IOException {
// Find the InputFormat and then the RecordReader from the
// TaggedInputSplit.
TaggedInputSplit taggedInputSplit = (TaggedInputSplit) split;
InputFormat<K, V> inputFormat = (InputFormat<K, V>) ReflectionUtils
.newInstance(taggedInputSplit.getInputFormatClass(), conf);
return inputFormat.getRecordReader(taggedInputSplit.getInputSplit(), conf,
reporter);
}
}
| 5,057 | 37.318182 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.LineRecordReader;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
/**
* NLineInputFormat which splits N lines of input as one split.
*
* In many "pleasantly" parallel applications, each process/mapper
* processes the same input file (s), but with computations are
* controlled by different parameters.(Referred to as "parameter sweeps").
* One way to achieve this, is to specify a set of parameters
* (one set per line) as input in a control file
* (which is the input path to the map-reduce application,
* where as the input dataset is specified
* via a config variable in JobConf.).
*
* The NLineInputFormat can be used in such applications, that splits
* the input file such that by default, one line is fed as
* a value to one map task, and key is the offset.
* i.e. (k,v) is (LongWritable, Text).
* The location hints will span the whole mapred cluster.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NLineInputFormat extends FileInputFormat<LongWritable, Text>
implements JobConfigurable {
private int N = 1;
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
/**
* Logically splits the set of input files for the job, splits N lines
* of the input as one split.
*
* @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int)
*/
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
ArrayList<FileSplit> splits = new ArrayList<FileSplit>();
for (FileStatus status : listStatus(job)) {
for (org.apache.hadoop.mapreduce.lib.input.FileSplit split :
org.apache.hadoop.mapreduce.lib.input.
NLineInputFormat.getSplitsForFile(status, job, N)) {
splits.add(new FileSplit(split));
}
}
return splits.toArray(new FileSplit[splits.size()]);
}
public void configure(JobConf conf) {
N = conf.getInt("mapreduce.input.lineinputformat.linespermap", 1);
}
/**
* NLineInputFormat uses LineRecordReader, which always reads
* (and consumes) at least one character out of its upper split
* boundary. So to make sure that each mapper gets N lines, we
* move back the upper split limits of each split
* by one character here.
* @param fileName Path of file
* @param begin the position of the first byte in the file to process
* @param length number of bytes in InputSplit
* @return FileSplit
*/
protected static FileSplit createFileSplit(Path fileName, long begin, long length) {
return (begin == 0)
? new FileSplit(fileName, begin, length - 1, new String[] {})
: new FileSplit(fileName, begin - 1, length, new String[] {});
}
}
| 4,487 | 39.071429 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
/**
* Input format that is a <code>CombineFileInputFormat</code>-equivalent for
* <code>TextInputFormat</code>.
*
* @see CombineFileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CombineTextInputFormat
extends CombineFileInputFormat<LongWritable,Text> {
@SuppressWarnings({ "rawtypes", "unchecked" })
public RecordReader<LongWritable,Text> getRecordReader(InputSplit split,
JobConf conf, Reporter reporter) throws IOException {
return new CombineFileRecordReader(conf, (CombineFileSplit)split, reporter,
TextRecordReaderWrapper.class);
}
/**
* A record reader that may be passed to <code>CombineFileRecordReader</code>
* so that it can be used in a <code>CombineFileInputFormat</code>-equivalent
* for <code>TextInputFormat</code>.
*
* @see CombineFileRecordReader
* @see CombineFileInputFormat
* @see TextInputFormat
*/
private static class TextRecordReaderWrapper
extends CombineFileRecordReaderWrapper<LongWritable,Text> {
// this constructor signature is required by CombineFileRecordReader
public TextRecordReaderWrapper(CombineFileSplit split, Configuration conf,
Reporter reporter, Integer idx) throws IOException {
super(new TextInputFormat(), split, conf, reporter, idx);
}
}
}
| 2,646 | 37.362319 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/RegexMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/**
* A {@link Mapper} that extracts text matching a regular expression.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RegexMapper<K> extends MapReduceBase
implements Mapper<K, Text, Text, LongWritable> {
private Pattern pattern;
private int group;
public void configure(JobConf job) {
pattern = Pattern.compile(job.get(org.apache.hadoop.mapreduce.lib.map.
RegexMapper.PATTERN));
group = job.getInt(org.apache.hadoop.mapreduce.lib.map.
RegexMapper.GROUP, 0);
}
public void map(K key, Text value,
OutputCollector<Text, LongWritable> output,
Reporter reporter)
throws IOException {
String text = value.toString();
Matcher matcher = pattern.matcher(text);
while (matcher.find()) {
output.collect(new Text(matcher.group(group)), new LongWritable(1));
}
}
}
| 2,255 | 33.181818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/IdentityMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.MapReduceBase;
/**
* Implements the identity function, mapping inputs directly to outputs.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class IdentityMapper<K, V>
extends MapReduceBase implements Mapper<K, V, K, V> {
/** The identify function. Input key/value pair is written directly to
* output.*/
public void map(K key, V val,
OutputCollector<K, V> output, Reporter reporter)
throws IOException {
output.collect(key, val);
}
}
| 1,652 | 34.934783 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/InverseMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/**
* A {@link Mapper} that swaps keys and values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InverseMapper<K, V>
extends MapReduceBase implements Mapper<K, V, V, K> {
/** The inverse function. Input keys and values are swapped.*/
public void map(K key, V value,
OutputCollector<V, K> output, Reporter reporter)
throws IOException {
output.collect(value, key);
}
}
| 1,610 | 34.021739 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleInputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This class supports MapReduce jobs that have multiple input paths with
* a different {@link InputFormat} and {@link Mapper} for each path
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultipleInputs {
/**
* Add a {@link Path} with a custom {@link InputFormat} to the list of
* inputs for the map-reduce job.
*
* @param conf The configuration of the job
* @param path {@link Path} to be added to the list of inputs for the job
* @param inputFormatClass {@link InputFormat} class to use for this path
*/
public static void addInputPath(JobConf conf, Path path,
Class<? extends InputFormat> inputFormatClass) {
String inputFormatMapping = path.toString() + ";"
+ inputFormatClass.getName();
String inputFormats = conf.get("mapreduce.input.multipleinputs.dir.formats");
conf.set("mapreduce.input.multipleinputs.dir.formats",
inputFormats == null ? inputFormatMapping : inputFormats + ","
+ inputFormatMapping);
conf.setInputFormat(DelegatingInputFormat.class);
}
/**
* Add a {@link Path} with a custom {@link InputFormat} and
* {@link Mapper} to the list of inputs for the map-reduce job.
*
* @param conf The configuration of the job
* @param path {@link Path} to be added to the list of inputs for the job
* @param inputFormatClass {@link InputFormat} class to use for this path
* @param mapperClass {@link Mapper} class to use for this path
*/
public static void addInputPath(JobConf conf, Path path,
Class<? extends InputFormat> inputFormatClass,
Class<? extends Mapper> mapperClass) {
addInputPath(conf, path, inputFormatClass);
String mapperMapping = path.toString() + ";" + mapperClass.getName();
String mappers = conf.get("mapreduce.input.multipleinputs.dir.mappers");
conf.set("mapreduce.input.multipleinputs.dir.mappers", mappers == null ? mapperMapping
: mappers + "," + mapperMapping);
conf.setMapperClass(DelegatingMapper.class);
}
/**
* Retrieves a map of {@link Path}s to the {@link InputFormat} class
* that should be used for them.
*
* @param conf The confuration of the job
* @see #addInputPath(JobConf, Path, Class)
* @return A map of paths to inputformats for the job
*/
static Map<Path, InputFormat> getInputFormatMap(JobConf conf) {
Map<Path, InputFormat> m = new HashMap<Path, InputFormat>();
String[] pathMappings = conf.get("mapreduce.input.multipleinputs.dir.formats").split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
InputFormat inputFormat;
try {
inputFormat = (InputFormat) ReflectionUtils.newInstance(conf
.getClassByName(split[1]), conf);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), inputFormat);
}
return m;
}
/**
* Retrieves a map of {@link Path}s to the {@link Mapper} class that
* should be used for them.
*
* @param conf The confuration of the job
* @see #addInputPath(JobConf, Path, Class, Class)
* @return A map of paths to mappers for the job
*/
@SuppressWarnings("unchecked")
static Map<Path, Class<? extends Mapper>> getMapperTypeMap(JobConf conf) {
if (conf.get("mapreduce.input.multipleinputs.dir.mappers") == null) {
return Collections.emptyMap();
}
Map<Path, Class<? extends Mapper>> m = new HashMap<Path, Class<? extends Mapper>>();
String[] pathMappings = conf.get("mapreduce.input.multipleinputs.dir.mappers").split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
Class<? extends Mapper> mapClass;
try {
mapClass = (Class<? extends Mapper>) conf.getClassByName(split[1]);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), mapClass);
}
return m;
}
}
| 5,238 | 37.522059 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringInterner;
/**
* An {@link InputSplit} that tags another InputSplit with extra data for use
* by {@link DelegatingInputFormat}s and {@link DelegatingMapper}s.
*/
class TaggedInputSplit implements Configurable, InputSplit {
private Class<? extends InputSplit> inputSplitClass;
private InputSplit inputSplit;
private Class<? extends InputFormat> inputFormatClass;
private Class<? extends Mapper> mapperClass;
private Configuration conf;
public TaggedInputSplit() {
// Default constructor.
}
/**
* Creates a new TaggedInputSplit.
*
* @param inputSplit The InputSplit to be tagged
* @param conf The configuration to use
* @param inputFormatClass The InputFormat class to use for this job
* @param mapperClass The Mapper class to use for this job
*/
public TaggedInputSplit(InputSplit inputSplit, Configuration conf,
Class<? extends InputFormat> inputFormatClass,
Class<? extends Mapper> mapperClass) {
this.inputSplitClass = inputSplit.getClass();
this.inputSplit = inputSplit;
this.conf = conf;
this.inputFormatClass = inputFormatClass;
this.mapperClass = mapperClass;
}
/**
* Retrieves the original InputSplit.
*
* @return The InputSplit that was tagged
*/
public InputSplit getInputSplit() {
return inputSplit;
}
/**
* Retrieves the InputFormat class to use for this split.
*
* @return The InputFormat class to use
*/
public Class<? extends InputFormat> getInputFormatClass() {
return inputFormatClass;
}
/**
* Retrieves the Mapper class to use for this split.
*
* @return The Mapper class to use
*/
public Class<? extends Mapper> getMapperClass() {
return mapperClass;
}
public long getLength() throws IOException {
return inputSplit.getLength();
}
public String[] getLocations() throws IOException {
return inputSplit.getLocations();
}
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
inputSplitClass = (Class<? extends InputSplit>) readClass(in);
inputSplit = (InputSplit) ReflectionUtils
.newInstance(inputSplitClass, conf);
inputSplit.readFields(in);
inputFormatClass = (Class<? extends InputFormat>) readClass(in);
mapperClass = (Class<? extends Mapper>) readClass(in);
}
private Class<?> readClass(DataInput in) throws IOException {
String className = StringInterner.weakIntern(Text.readString(in));
try {
return conf.getClassByName(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException("readObject can't find class", e);
}
}
public void write(DataOutput out) throws IOException {
Text.writeString(out, inputSplitClass.getName());
inputSplit.write(out);
Text.writeString(out, inputFormatClass.getName());
Text.writeString(out, mapperClass.getName());
}
public Configuration getConf() {
return conf;
}
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public String toString() {
return inputSplit.toString();
}
}
| 4,362 | 28.680272 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/DelegatingMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link Mapper} that delegates behaviour of paths to multiple other
* mappers.
*
* @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegatingMapper<K1, V1, K2, V2> implements Mapper<K1, V1, K2, V2> {
private JobConf conf;
private Mapper<K1, V1, K2, V2> mapper;
@SuppressWarnings("unchecked")
public void map(K1 key, V1 value, OutputCollector<K2, V2> outputCollector,
Reporter reporter) throws IOException {
if (mapper == null) {
// Find the Mapper from the TaggedInputSplit.
TaggedInputSplit inputSplit = (TaggedInputSplit) reporter.getInputSplit();
mapper = (Mapper<K1, V1, K2, V2>) ReflectionUtils.newInstance(inputSplit
.getMapperClass(), conf);
}
mapper.map(key, value, outputCollector, reporter);
}
public void configure(JobConf conf) {
this.conf = conf;
}
public void close() throws IOException {
if (mapper != null) {
mapper.close();
}
}
}
| 2,284 | 31.642857 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/FilterOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable;
/**
* FilterOutputFormat is a convenience class that wraps OutputFormat.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FilterOutputFormat<K, V> implements OutputFormat<K, V> {
protected OutputFormat<K,V> baseOut;
public FilterOutputFormat () {
this.baseOut = null;
}
/**
* Create a FilterOutputFormat based on the supplied output format.
* @param out the underlying OutputFormat
*/
public FilterOutputFormat (OutputFormat<K,V> out) {
this.baseOut = out;
}
public RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job,
String name, Progressable progress) throws IOException {
return getBaseOut().getRecordWriter(ignored, job, name, progress);
}
public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws IOException {
getBaseOut().checkOutputSpecs(ignored, job);
}
private OutputFormat<K,V> getBaseOut() throws IOException {
if (baseOut == null) {
throw new IOException("Outputformat not set for FilterOutputFormat");
}
return baseOut;
}
/**
* <code>FilterRecordWriter</code> is a convenience wrapper
* class that implements {@link RecordWriter}.
*/
public static class FilterRecordWriter<K,V> implements RecordWriter<K,V> {
protected RecordWriter<K,V> rawWriter = null;
public FilterRecordWriter() throws IOException {
rawWriter = null;
}
public FilterRecordWriter(RecordWriter<K,V> rawWriter) throws IOException {
this.rawWriter = rawWriter;
}
public void close(Reporter reporter) throws IOException {
getRawWriter().close(reporter);
}
public void write(K key, V value) throws IOException {
getRawWriter().write(key, value);
}
private RecordWriter<K,V> getRawWriter() throws IOException {
if (rawWriter == null) {
throw new IOException ("Record Writer not set for FilterRecordWriter");
}
return rawWriter;
}
}
}
| 3,226 | 30.028846 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
/**
* Partitioner effecting a total order by reading split points from
* an externally generated source.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TotalOrderPartitioner<K extends WritableComparable<?>,V>
extends org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner<K, V>
implements Partitioner<K,V> {
public TotalOrderPartitioner() { }
public void configure(JobConf job) {
super.setConf(job);
}
/**
* Set the path to the SequenceFile storing the sorted partition keyset.
* It must be the case that for <tt>R</tt> reduces, there are <tt>R-1</tt>
* keys in the SequenceFile.
* @deprecated Use
* {@link #setPartitionFile(Configuration, Path)}
* instead
*/
@Deprecated
public static void setPartitionFile(JobConf job, Path p) {
org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner.
setPartitionFile(job, p);
}
/**
* Get the path to the SequenceFile storing the sorted partition keyset.
* @see #setPartitionFile(JobConf,Path)
* @deprecated Use
* {@link #getPartitionFile(Configuration)}
* instead
*/
@Deprecated
public static String getPartitionFile(JobConf job) {
return org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner.
getPartitionFile(job);
}
}
| 2,458 | 33.152778 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CombineFileSplit extends
org.apache.hadoop.mapreduce.lib.input.CombineFileSplit
implements InputSplit {
private JobConf job;
public CombineFileSplit() {
}
public CombineFileSplit(JobConf job, Path[] files, long[] start,
long[] lengths, String[] locations) {
super(files, start, lengths, locations);
this.job = job;
}
public CombineFileSplit(JobConf job, Path[] files, long[] lengths) {
super(files, lengths);
this.job = job;
}
/**
* Copy constructor
*/
public CombineFileSplit(CombineFileSplit old) throws IOException {
super(old);
}
public JobConf getJob() {
return job;
}
}
| 1,842 | 28.725806 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.util.Progressable;
/**
* This class extends the MultipleOutputFormat, allowing to write the output data
* to different output files in sequence file output format.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultipleSequenceFileOutputFormat <K,V>
extends MultipleOutputFormat<K, V> {
private SequenceFileOutputFormat<K,V> theSequenceFileOutputFormat = null;
@Override
protected RecordWriter<K, V> getBaseRecordWriter(FileSystem fs,
JobConf job,
String name,
Progressable arg3)
throws IOException {
if (theSequenceFileOutputFormat == null) {
theSequenceFileOutputFormat = new SequenceFileOutputFormat<K,V>();
}
return theSequenceFileOutputFormat.getRecordWriter(fs, job, name, arg3);
}
}
| 2,111 | 38.111111 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.SplittableCompressionCodec;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* An abstract {@link org.apache.hadoop.mapred.InputFormat} that returns {@link CombineFileSplit}'s
* in {@link org.apache.hadoop.mapred.InputFormat#getSplits(JobConf, int)} method.
* Splits are constructed from the files under the input paths.
* A split cannot have files from different pools.
* Each split returned may contain blocks from different files.
* If a maxSplitSize is specified, then blocks on the same node are
* combined to form a single split. Blocks that are left over are
* then combined with other blocks in the same rack.
* If maxSplitSize is not specified, then blocks from the same rack
* are combined in a single split; no attempt is made to create
* node-local splits.
* If the maxSplitSize is equal to the block size, then this class
* is similar to the default spliting behaviour in Hadoop: each
* block is a locally processed split.
* Subclasses implement {@link org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, JobConf, Reporter)}
* to construct <code>RecordReader</code>'s for <code>CombineFileSplit</code>'s.
* @see CombineFileSplit
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class CombineFileInputFormat<K, V>
extends org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat<K, V>
implements InputFormat<K, V>{
/**
* default constructor
*/
public CombineFileInputFormat() {
}
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
List<org.apache.hadoop.mapreduce.InputSplit> newStyleSplits =
super.getSplits(Job.getInstance(job));
InputSplit[] ret = new InputSplit[newStyleSplits.size()];
for(int pos = 0; pos < newStyleSplits.size(); ++pos) {
org.apache.hadoop.mapreduce.lib.input.CombineFileSplit newStyleSplit =
(org.apache.hadoop.mapreduce.lib.input.CombineFileSplit) newStyleSplits.get(pos);
ret[pos] = new CombineFileSplit(job, newStyleSplit.getPaths(),
newStyleSplit.getStartOffsets(), newStyleSplit.getLengths(),
newStyleSplit.getLocations());
}
return ret;
}
/**
* Create a new pool and add the filters to it.
* A split cannot have files from different pools.
* @deprecated Use {@link #createPool(List)}.
*/
@Deprecated
protected void createPool(JobConf conf, List<PathFilter> filters) {
createPool(filters);
}
/**
* Create a new pool and add the filters to it.
* A pathname can satisfy any one of the specified filters.
* A split cannot have files from different pools.
* @deprecated Use {@link #createPool(PathFilter...)}.
*/
@Deprecated
protected void createPool(JobConf conf, PathFilter... filters) {
createPool(filters);
}
/**
* This is not implemented yet.
*/
public abstract RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job, Reporter reporter)
throws IOException;
// abstract method from super class implemented to return null
public org.apache.hadoop.mapreduce.RecordReader<K, V> createRecordReader(
org.apache.hadoop.mapreduce.InputSplit split,
TaskAttemptContext context) throws IOException {
return null;
}
/** List input directories.
* Subclasses may override to, e.g., select only files matching a regular
* expression.
*
* @param job the job to list input paths for
* @return array of FileStatus objects
* @throws IOException if zero items.
*/
protected FileStatus[] listStatus(JobConf job) throws IOException {
List<FileStatus> result = super.listStatus(Job.getInstance(job));
return result.toArray(new FileStatus[result.size()]);
}
/**
* Subclasses should avoid overriding this method and should instead only
* override {@link #isSplitable(FileSystem, Path)}. The implementation of
* this method simply calls the other method to preserve compatibility.
* @see <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5530">
* MAPREDUCE-5530</a>
*
* @param context the job context
* @param file the file name to check
* @return is this file splitable?
*/
@InterfaceAudience.Private
@Override
protected boolean isSplitable(JobContext context, Path file) {
try {
return isSplitable(FileSystem.get(context.getConfiguration()), file);
}
catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
protected boolean isSplitable(FileSystem fs, Path file) {
final CompressionCodec codec =
new CompressionCodecFactory(fs.getConf()).getCodec(file);
if (null == codec) {
return true;
}
return codec instanceof SplittableCompressionCodec;
}
}
| 6,386 | 37.245509 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
/**
* Defines a way to partition keys based on certain key fields (also see
* {@link KeyFieldBasedComparator}.
* The key specification supported is of the form -k pos1[,pos2], where,
* pos is of the form f[.c][opts], where f is the number
* of the key field to use, and c is the number of the first character from
* the beginning of the field. Fields and character posns are numbered
* starting with 1; a character position of zero in pos2 indicates the
* field's last character. If '.c' is omitted from pos1, it defaults to 1
* (the beginning of the field); if omitted from pos2, it defaults to 0
* (the end of the field).
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class KeyFieldBasedPartitioner<K2, V2> extends
org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedPartitioner<K2, V2>
implements Partitioner<K2, V2> {
public void configure(JobConf job) {
super.setConf(job);
}
}
| 2,009 | 40.875 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/HashPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.hadoop.mapred.JobConf;
/**
* Partition keys by their {@link Object#hashCode()}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HashPartitioner<K2, V2> implements Partitioner<K2, V2> {
public void configure(JobConf job) {}
/** Use {@link Object#hashCode()} to partition. */
public int getPartition(K2 key, V2 value,
int numReduceTasks) {
return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
}
}
| 1,500 | 34.738095 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/**
* A {@link Mapper} that maps text values into <token,freq> pairs. Uses
* {@link StringTokenizer} to break text into tokens.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TokenCountMapper<K> extends MapReduceBase
implements Mapper<K, Text, Text, LongWritable> {
public void map(K key, Text value,
OutputCollector<Text, LongWritable> output,
Reporter reporter)
throws IOException {
// get input text
String text = value.toString(); // value is line of text
// tokenize the value
StringTokenizer st = new StringTokenizer(text);
while (st.hasMoreTokens()) {
// output <token,1> pairs
output.collect(new Text(st.nextToken()), new LongWritable(1));
}
}
}
| 2,070 | 34.101695 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.db;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Job;
@InterfaceAudience.Public
@InterfaceStability.Stable
@SuppressWarnings("deprecation")
public class DBInputFormat<T extends DBWritable>
extends org.apache.hadoop.mapreduce.lib.db.DBInputFormat<T>
implements InputFormat<LongWritable, T>, JobConfigurable {
/**
* A RecordReader that reads records from a SQL table.
* Emits LongWritables containing the record number as
* key and DBWritables as value.
*/
protected class DBRecordReader extends
org.apache.hadoop.mapreduce.lib.db.DBRecordReader<T>
implements RecordReader<LongWritable, T> {
/**
* The constructor is kept to be compatible with M/R 1.x
*
* @param split The InputSplit to read data for
* @throws SQLException
*/
protected DBRecordReader(DBInputSplit split, Class<T> inputClass,
JobConf job) throws SQLException {
super(split, inputClass, job, connection, dbConf, conditions, fieldNames, tableName);
}
/**
* @param split The InputSplit to read data for
* @throws SQLException
*/
protected DBRecordReader(DBInputSplit split, Class<T> inputClass,
JobConf job, Connection conn, DBConfiguration dbConfig, String cond,
String [] fields, String table) throws SQLException {
super(split, inputClass, job, conn, dbConfig, cond, fields, table);
}
/** {@inheritDoc} */
public LongWritable createKey() {
return new LongWritable();
}
/** {@inheritDoc} */
public T createValue() {
return super.createValue();
}
public long getPos() throws IOException {
return super.getPos();
}
/** {@inheritDoc} */
public boolean next(LongWritable key, T value) throws IOException {
return super.next(key, value);
}
}
/**
* A RecordReader implementation that just passes through to a wrapped
* RecordReader built with the new API.
*/
private static class DBRecordReaderWrapper<T extends DBWritable>
implements RecordReader<LongWritable, T> {
private org.apache.hadoop.mapreduce.lib.db.DBRecordReader<T> rr;
public DBRecordReaderWrapper(
org.apache.hadoop.mapreduce.lib.db.DBRecordReader<T> inner) {
this.rr = inner;
}
public void close() throws IOException {
rr.close();
}
public LongWritable createKey() {
return new LongWritable();
}
public T createValue() {
return rr.createValue();
}
public float getProgress() throws IOException {
return rr.getProgress();
}
public long getPos() throws IOException {
return rr.getPos();
}
public boolean next(LongWritable key, T value) throws IOException {
return rr.next(key, value);
}
}
/**
* A Class that does nothing, implementing DBWritable
*/
public static class NullDBWritable extends
org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable
implements DBWritable, Writable {
}
/**
* A InputSplit that spans a set of rows
*/
protected static class DBInputSplit extends
org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit
implements InputSplit {
/**
* Default Constructor
*/
public DBInputSplit() {
}
/**
* Convenience Constructor
* @param start the index of the first row to select
* @param end the index of the last row to select
*/
public DBInputSplit(long start, long end) {
super(start, end);
}
}
/** {@inheritDoc} */
public void configure(JobConf job) {
super.setConf(job);
}
/** {@inheritDoc} */
public RecordReader<LongWritable, T> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
// wrap the DBRR in a shim class to deal with API differences.
return new DBRecordReaderWrapper<T>(
(org.apache.hadoop.mapreduce.lib.db.DBRecordReader<T>)
createDBRecordReader(
(org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit) split, job));
}
/** {@inheritDoc} */
public InputSplit[] getSplits(JobConf job, int chunks) throws IOException {
List<org.apache.hadoop.mapreduce.InputSplit> newSplits =
super.getSplits(Job.getInstance(job));
InputSplit[] ret = new InputSplit[newSplits.size()];
int i = 0;
for (org.apache.hadoop.mapreduce.InputSplit s : newSplits) {
org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit split =
(org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit)s;
ret[i++] = new DBInputSplit(split.getStart(), split.getEnd());
}
return ret;
}
/**
* Initializes the map-part of the job with the appropriate input settings.
*
* @param job The job
* @param inputClass the class object implementing DBWritable, which is the
* Java object holding tuple fields.
* @param tableName The table to read data from
* @param conditions The condition which to select data with, eg. '(updated >
* 20070101 AND length > 0)'
* @param orderBy the fieldNames in the orderBy clause.
* @param fieldNames The field names in the table
* @see #setInput(JobConf, Class, String, String)
*/
public static void setInput(JobConf job, Class<? extends DBWritable> inputClass,
String tableName,String conditions, String orderBy, String... fieldNames) {
job.setInputFormat(DBInputFormat.class);
DBConfiguration dbConf = new DBConfiguration(job);
dbConf.setInputClass(inputClass);
dbConf.setInputTableName(tableName);
dbConf.setInputFieldNames(fieldNames);
dbConf.setInputConditions(conditions);
dbConf.setInputOrderBy(orderBy);
}
/**
* Initializes the map-part of the job with the appropriate input settings.
*
* @param job The job
* @param inputClass the class object implementing DBWritable, which is the
* Java object holding tuple fields.
* @param inputQuery the input query to select fields. Example :
* "SELECT f1, f2, f3 FROM Mytable ORDER BY f1"
* @param inputCountQuery the input query that returns the number of records in
* the table.
* Example : "SELECT COUNT(f1) FROM Mytable"
* @see #setInput(JobConf, Class, String, String, String, String...)
*/
public static void setInput(JobConf job, Class<? extends DBWritable> inputClass,
String inputQuery, String inputCountQuery) {
job.setInputFormat(DBInputFormat.class);
DBConfiguration dbConf = new DBConfiguration(job);
dbConf.setInputClass(inputClass);
dbConf.setInputQuery(inputQuery);
dbConf.setInputCountQuery(inputCountQuery);
}
}
| 8,062 | 32.595833 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.db;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface DBWritable
extends org.apache.hadoop.mapreduce.lib.db.DBWritable {
}
| 1,115 | 37.482759 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.db;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.Progressable;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DBOutputFormat<K extends DBWritable, V>
extends org.apache.hadoop.mapreduce.lib.db.DBOutputFormat<K, V>
implements OutputFormat<K, V> {
/**
* A RecordWriter that writes the reduce output to a SQL table
*/
protected class DBRecordWriter extends
org.apache.hadoop.mapreduce.lib.db.DBOutputFormat<K, V>.DBRecordWriter
implements RecordWriter<K, V> {
protected DBRecordWriter(Connection connection,
PreparedStatement statement) throws SQLException {
super(connection, statement);
}
/** {@inheritDoc} */
public void close(Reporter reporter) throws IOException {
super.close(null);
}
}
/** {@inheritDoc} */
public void checkOutputSpecs(FileSystem filesystem, JobConf job)
throws IOException {
}
/** {@inheritDoc} */
public RecordWriter<K, V> getRecordWriter(FileSystem filesystem,
JobConf job, String name, Progressable progress) throws IOException {
org.apache.hadoop.mapreduce.RecordWriter<K, V> w = super.getRecordWriter(
new TaskAttemptContextImpl(job,
TaskAttemptID.forName(job.get(MRJobConfig.TASK_ATTEMPT_ID))));
org.apache.hadoop.mapreduce.lib.db.DBOutputFormat.DBRecordWriter writer =
(org.apache.hadoop.mapreduce.lib.db.DBOutputFormat.DBRecordWriter) w;
try {
return new DBRecordWriter(writer.getConnection(), writer.getStatement());
} catch(SQLException se) {
throw new IOException(se);
}
}
/**
* Initializes the reduce-part of the job with the appropriate output settings
*
* @param job The job
* @param tableName The table to insert data into
* @param fieldNames The field names in the table.
*/
public static void setOutput(JobConf job, String tableName, String... fieldNames) {
if(fieldNames.length > 0 && fieldNames[0] != null) {
DBConfiguration dbConf = setOutput(job, tableName);
dbConf.setOutputFieldNames(fieldNames);
} else {
if(fieldNames.length > 0)
setOutput(job, tableName, fieldNames.length);
else
throw new IllegalArgumentException("Field names must be greater than 0");
}
}
/**
* Initializes the reduce-part of the job with the appropriate output settings
*
* @param job The job
* @param tableName The table to insert data into
* @param fieldCount the number of fields in the table.
*/
public static void setOutput(JobConf job, String tableName, int fieldCount) {
DBConfiguration dbConf = setOutput(job, tableName);
dbConf.setOutputFieldCount(fieldCount);
}
private static DBConfiguration setOutput(JobConf job, String tableName) {
job.setOutputFormat(DBOutputFormat.class);
job.setReduceSpeculativeExecution(false);
DBConfiguration dbConf = new DBConfiguration(job);
dbConf.setOutputTableName(tableName);
return dbConf;
}
}
| 4,434 | 34.48 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.db;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobConf;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DBConfiguration extends
org.apache.hadoop.mapreduce.lib.db.DBConfiguration {
/** The JDBC Driver class name */
public static final String DRIVER_CLASS_PROPERTY =
org.apache.hadoop.mapreduce.lib.db.DBConfiguration.DRIVER_CLASS_PROPERTY;
/** JDBC Database access URL */
public static final String URL_PROPERTY =
org.apache.hadoop.mapreduce.lib.db.DBConfiguration.URL_PROPERTY;
/** User name to access the database */
public static final String USERNAME_PROPERTY =
org.apache.hadoop.mapreduce.lib.db.DBConfiguration.USERNAME_PROPERTY;
/** Password to access the database */
public static final String PASSWORD_PROPERTY =
org.apache.hadoop.mapreduce.lib.db.DBConfiguration.PASSWORD_PROPERTY;
/** Input table name */
public static final String INPUT_TABLE_NAME_PROPERTY = org.apache.hadoop.
mapreduce.lib.db.DBConfiguration.INPUT_TABLE_NAME_PROPERTY;
/** Field names in the Input table */
public static final String INPUT_FIELD_NAMES_PROPERTY = org.apache.hadoop.
mapreduce.lib.db.DBConfiguration.INPUT_FIELD_NAMES_PROPERTY;
/** WHERE clause in the input SELECT statement */
public static final String INPUT_CONDITIONS_PROPERTY = org.apache.hadoop.
mapreduce.lib.db.DBConfiguration.INPUT_CONDITIONS_PROPERTY;
/** ORDER BY clause in the input SELECT statement */
public static final String INPUT_ORDER_BY_PROPERTY = org.apache.hadoop.
mapreduce.lib.db.DBConfiguration.INPUT_ORDER_BY_PROPERTY;
/** Whole input query, exluding LIMIT...OFFSET */
public static final String INPUT_QUERY =
org.apache.hadoop.mapreduce.lib.db.DBConfiguration.INPUT_QUERY;
/** Input query to get the count of records */
public static final String INPUT_COUNT_QUERY =
org.apache.hadoop.mapreduce.lib.db.DBConfiguration.INPUT_COUNT_QUERY;
/** Class name implementing DBWritable which will hold input tuples */
public static final String INPUT_CLASS_PROPERTY =
org.apache.hadoop.mapreduce.lib.db.DBConfiguration.INPUT_CLASS_PROPERTY;
/** Output table name */
public static final String OUTPUT_TABLE_NAME_PROPERTY = org.apache.hadoop.
mapreduce.lib.db.DBConfiguration.OUTPUT_TABLE_NAME_PROPERTY;
/** Field names in the Output table */
public static final String OUTPUT_FIELD_NAMES_PROPERTY = org.apache.hadoop.
mapreduce.lib.db.DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY;
/** Number of fields in the Output table */
public static final String OUTPUT_FIELD_COUNT_PROPERTY = org.apache.hadoop.
mapreduce.lib.db.DBConfiguration.OUTPUT_FIELD_COUNT_PROPERTY;
/**
* Sets the DB access related fields in the JobConf.
* @param job the job
* @param driverClass JDBC Driver class name
* @param dbUrl JDBC DB access URL.
* @param userName DB access username
* @param passwd DB access passwd
*/
public static void configureDB(JobConf job, String driverClass, String dbUrl
, String userName, String passwd) {
job.set(DRIVER_CLASS_PROPERTY, driverClass);
job.set(URL_PROPERTY, dbUrl);
if(userName != null)
job.set(USERNAME_PROPERTY, userName);
if(passwd != null)
job.set(PASSWORD_PROPERTY, passwd);
}
/**
* Sets the DB access related fields in the JobConf.
* @param job the job
* @param driverClass JDBC Driver class name
* @param dbUrl JDBC DB access URL.
*/
public static void configureDB(JobConf job, String driverClass, String dbUrl) {
configureDB(job, driverClass, dbUrl, null, null);
}
DBConfiguration(JobConf job) {
super(job);
}
}
| 4,635 | 37.31405 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that computes the
* histogram of a sequence of strings.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueHistogram
extends org.apache.hadoop.mapreduce.lib.aggregate.ValueHistogram
implements ValueAggregator<String> {
}
| 1,282 | 36.735294 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that sums up a sequence of double
* values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DoubleValueSum
extends org.apache.hadoop.mapreduce.lib.aggregate.DoubleValueSum
implements ValueAggregator<String> {
}
| 1,269 | 35.285714 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.Reducer;
/**
* This abstract class implements some common functionalities of the
* the generic mapper, reducer and combiner classes of Aggregate.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ValueAggregatorJobBase<K1 extends WritableComparable,
V1 extends Writable>
implements Mapper<K1, V1, Text, Text>, Reducer<Text, Text, Text, Text> {
protected ArrayList<ValueAggregatorDescriptor> aggregatorDescriptorList = null;
public void configure(JobConf job) {
this.initializeMySpec(job);
this.logSpec();
}
private static ValueAggregatorDescriptor getValueAggregatorDescriptor(
String spec, JobConf job) {
if (spec == null)
return null;
String[] segments = spec.split(",", -1);
String type = segments[0];
if (type.compareToIgnoreCase("UserDefined") == 0) {
String className = segments[1];
return new UserDefinedValueAggregatorDescriptor(className, job);
}
return null;
}
private static ArrayList<ValueAggregatorDescriptor> getAggregatorDescriptors(JobConf job) {
String advn = "aggregator.descriptor";
int num = job.getInt(advn + ".num", 0);
ArrayList<ValueAggregatorDescriptor> retv = new ArrayList<ValueAggregatorDescriptor>(num);
for (int i = 0; i < num; i++) {
String spec = job.get(advn + "." + i);
ValueAggregatorDescriptor ad = getValueAggregatorDescriptor(spec, job);
if (ad != null) {
retv.add(ad);
}
}
return retv;
}
private void initializeMySpec(JobConf job) {
this.aggregatorDescriptorList = getAggregatorDescriptors(job);
if (this.aggregatorDescriptorList.size() == 0) {
this.aggregatorDescriptorList
.add(new UserDefinedValueAggregatorDescriptor(
ValueAggregatorBaseDescriptor.class.getCanonicalName(), job));
}
}
protected void logSpec() {
}
public void close() throws IOException {
}
}
| 3,235 | 33.795699 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobConf;
/**
* This class implements a wrapper for a user defined value aggregator
* descriptor.
* It serves two functions: One is to create an object of
* ValueAggregatorDescriptor from the name of a user defined class that may be
* dynamically loaded. The other is to delegate invocations of
* generateKeyValPairs function to the created object.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UserDefinedValueAggregatorDescriptor extends org.apache.hadoop.
mapreduce.lib.aggregate.UserDefinedValueAggregatorDescriptor
implements ValueAggregatorDescriptor {
/**
* Create an instance of the given class
* @param className the name of the class
* @return a dynamically created instance of the given class
*/
public static Object createInstance(String className) {
return org.apache.hadoop.mapreduce.lib.aggregate.
UserDefinedValueAggregatorDescriptor.createInstance(className);
}
/**
*
* @param className the class name of the user defined descriptor class
* @param job a configure object used for decriptor configuration
*/
public UserDefinedValueAggregatorDescriptor(String className, JobConf job) {
super(className, job);
((ValueAggregatorDescriptor)theAggregatorDescriptor).configure(job);
}
/**
* Do nothing.
*/
public void configure(JobConf job) {
}
}
| 2,381 | 34.552239 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
/**
* This class implements the common functionalities of
* the subclasses of ValueAggregatorDescriptor class.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorBaseDescriptor extends org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor
implements ValueAggregatorDescriptor {
static public final String UNIQ_VALUE_COUNT = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor.UNIQ_VALUE_COUNT;
static public final String LONG_VALUE_SUM = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor.LONG_VALUE_SUM;
static public final String DOUBLE_VALUE_SUM = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor.DOUBLE_VALUE_SUM;
static public final String VALUE_HISTOGRAM = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor.VALUE_HISTOGRAM;
static public final String LONG_VALUE_MAX = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor.LONG_VALUE_MAX;
static public final String LONG_VALUE_MIN = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor.LONG_VALUE_MIN;
static public final String STRING_VALUE_MAX = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor.STRING_VALUE_MAX;
static public final String STRING_VALUE_MIN = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorBaseDescriptor.STRING_VALUE_MIN;
private static long maxNumItems = Long.MAX_VALUE;
/**
*
* @param type the aggregation type
* @param id the aggregation id
* @param val the val associated with the id to be aggregated
* @return an Entry whose key is the aggregation id prefixed with
* the aggregation type.
*/
public static Entry<Text, Text> generateEntry(String type, String id, Text val) {
return org.apache.hadoop.mapreduce.lib.aggregate.
ValueAggregatorBaseDescriptor.generateEntry(type, id, val);
}
/**
*
* @param type the aggregation type
* @return a value aggregator of the given type.
*/
static public ValueAggregator generateValueAggregator(String type) {
ValueAggregator retv = null;
if (type.compareToIgnoreCase(LONG_VALUE_SUM) == 0) {
retv = new LongValueSum();
} if (type.compareToIgnoreCase(LONG_VALUE_MAX) == 0) {
retv = new LongValueMax();
} else if (type.compareToIgnoreCase(LONG_VALUE_MIN) == 0) {
retv = new LongValueMin();
} else if (type.compareToIgnoreCase(STRING_VALUE_MAX) == 0) {
retv = new StringValueMax();
} else if (type.compareToIgnoreCase(STRING_VALUE_MIN) == 0) {
retv = new StringValueMin();
} else if (type.compareToIgnoreCase(DOUBLE_VALUE_SUM) == 0) {
retv = new DoubleValueSum();
} else if (type.compareToIgnoreCase(UNIQ_VALUE_COUNT) == 0) {
retv = new UniqValueCount(maxNumItems);
} else if (type.compareToIgnoreCase(VALUE_HISTOGRAM) == 0) {
retv = new ValueHistogram();
}
return retv;
}
/**
* get the input file name.
*
* @param job a job configuration object
*/
public void configure(JobConf job) {
super.configure(job);
maxNumItems = job.getLong("aggregate.max.num.unique.values",
Long.MAX_VALUE);
}
}
| 4,393 | 37.208696 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that maintain the maximum of
* a sequence of long values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongValueMax
extends org.apache.hadoop.mapreduce.lib.aggregate.LongValueMax
implements ValueAggregator<String> {
}
| 1,280 | 35.6 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/**
* This class implements the generic reducer of Aggregate.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorReducer<K1 extends WritableComparable,
V1 extends Writable>
extends ValueAggregatorJobBase<K1, V1> {
/**
* @param key
* the key is expected to be a Text object, whose prefix indicates
* the type of aggregation to aggregate the values. In effect, data
* driven computing is achieved. It is assumed that each aggregator's
* getReport method emits appropriate output for the aggregator. This
* may be further customiized.
* @param values
* the values to be aggregated
*/
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
String keyStr = key.toString();
int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
String type = keyStr.substring(0, pos);
keyStr = keyStr.substring(pos
+ ValueAggregatorDescriptor.TYPE_SEPARATOR.length());
ValueAggregator aggregator = ValueAggregatorBaseDescriptor
.generateValueAggregator(type);
while (values.hasNext()) {
aggregator.addNextValue(values.next());
}
String val = aggregator.getReport();
key = new Text(keyStr);
output.collect(key, new Text(val));
}
/**
* Do nothing. Should not be called
*/
public void map(K1 arg0, V1 arg1, OutputCollector<Text, Text> arg2,
Reporter arg3) throws IOException {
throw new IOException ("should not be called\n");
}
}
| 2,937 | 36.666667 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that maintain the smallest of
* a sequence of strings.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class StringValueMin
extends org.apache.hadoop.mapreduce.lib.aggregate.StringValueMin
implements ValueAggregator<String> {
}
| 1,281 | 36.705882 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/**
* This class implements the generic mapper of Aggregate.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorMapper<K1 extends WritableComparable,
V1 extends Writable>
extends ValueAggregatorJobBase<K1, V1> {
/**
* the map function. It iterates through the value aggregator descriptor
* list to generate aggregation id/value pairs and emit them.
*/
public void map(K1 key, V1 value,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
Iterator iter = this.aggregatorDescriptorList.iterator();
while (iter.hasNext()) {
ValueAggregatorDescriptor ad = (ValueAggregatorDescriptor) iter.next();
Iterator<Entry<Text, Text>> ens =
ad.generateKeyValPairs(key, value).iterator();
while (ens.hasNext()) {
Entry<Text, Text> en = ens.next();
output.collect(en.getKey(), en.getValue());
}
}
}
/**
* Do nothing. Should not be called.
*/
public void reduce(Text arg0, Iterator<Text> arg1,
OutputCollector<Text, Text> arg2,
Reporter arg3) throws IOException {
throw new IOException("should not be called\n");
}
}
| 2,525 | 35.085714 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/**
* This class implements the generic combiner of Aggregate.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorCombiner<K1 extends WritableComparable,
V1 extends Writable>
extends ValueAggregatorJobBase<K1, V1> {
/**
* Combiner does not need to configure.
*/
public void configure(JobConf job) {
}
/** Combines values for a given key.
* @param key the key is expected to be a Text object, whose prefix indicates
* the type of aggregation to aggregate the values.
* @param values the values to combine
* @param output to collect combined values
*/
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
String keyStr = key.toString();
int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
String type = keyStr.substring(0, pos);
ValueAggregator aggregator = ValueAggregatorBaseDescriptor
.generateValueAggregator(type);
while (values.hasNext()) {
aggregator.addNextValue(values.next());
}
Iterator outputs = aggregator.getCombinerOutput().iterator();
while (outputs.hasNext()) {
Object v = outputs.next();
if (v instanceof Text) {
output.collect(key, (Text)v);
} else {
output.collect(key, new Text(v.toString()));
}
}
}
/**
* Do nothing.
*
*/
public void close() throws IOException {
}
/**
* Do nothing. Should not be called.
*
*/
public void map(K1 arg0, V1 arg1, OutputCollector<Text, Text> arg2,
Reporter arg3) throws IOException {
throw new IOException ("should not be called\n");
}
}
| 3,046 | 31.414894 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that sums up
* a sequence of long values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongValueSum
extends org.apache.hadoop.mapreduce.lib.aggregate.LongValueSum
implements ValueAggregator<String> {
}
| 1,266 | 34.194444 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.jobcontrol.Job;
import org.apache.hadoop.mapred.jobcontrol.JobControl;
import org.apache.hadoop.util.GenericOptionsParser;
/**
* This is the main class for creating a map/reduce job using Aggregate
* framework. The Aggregate is a specialization of map/reduce framework,
* specilizing for performing various simple aggregations.
*
* Generally speaking, in order to implement an application using Map/Reduce
* model, the developer is to implement Map and Reduce functions (and possibly
* combine function). However, a lot of applications related to counting and
* statistics computing have very similar characteristics. Aggregate abstracts
* out the general patterns of these functions and implementing those patterns.
* In particular, the package provides generic mapper/redducer/combiner classes,
* and a set of built-in value aggregators, and a generic utility class that
* helps user create map/reduce jobs using the generic class. The built-in
* aggregators include:
*
* sum over numeric values count the number of distinct values compute the
* histogram of values compute the minimum, maximum, media,average, standard
* deviation of numeric values
*
* The developer using Aggregate will need only to provide a plugin class
* conforming to the following interface:
*
* public interface ValueAggregatorDescriptor { public ArrayList<Entry>
* generateKeyValPairs(Object key, Object value); public void
* configure(JobConfjob); }
*
* The package also provides a base class, ValueAggregatorBaseDescriptor,
* implementing the above interface. The user can extend the base class and
* implement generateKeyValPairs accordingly.
*
* The primary work of generateKeyValPairs is to emit one or more key/value
* pairs based on the input key/value pair. The key in an output key/value pair
* encode two pieces of information: aggregation type and aggregation id. The
* value will be aggregated onto the aggregation id according the aggregation
* type.
*
* This class offers a function to generate a map/reduce job using Aggregate
* framework. The function takes the following parameters: input directory spec
* input format (text or sequence file) output directory a file specifying the
* user plugin class
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorJob {
public static JobControl createValueAggregatorJobs(String args[]
, Class<? extends ValueAggregatorDescriptor>[] descriptors) throws IOException {
JobControl theControl = new JobControl("ValueAggregatorJobs");
ArrayList<Job> dependingJobs = new ArrayList<Job>();
JobConf aJobConf = createValueAggregatorJob(args);
if(descriptors != null)
setAggregatorDescriptors(aJobConf, descriptors);
Job aJob = new Job(aJobConf, dependingJobs);
theControl.addJob(aJob);
return theControl;
}
public static JobControl createValueAggregatorJobs(String args[]) throws IOException {
return createValueAggregatorJobs(args, null);
}
/**
* Create an Aggregate based map/reduce job.
*
* @param args the arguments used for job creation. Generic hadoop
* arguments are accepted.
* @param caller the the caller class.
* @return a JobConf object ready for submission.
*
* @throws IOException
* @see GenericOptionsParser
*/
@SuppressWarnings("rawtypes")
public static JobConf createValueAggregatorJob(String args[], Class<?> caller)
throws IOException {
Configuration conf = new Configuration();
GenericOptionsParser genericParser
= new GenericOptionsParser(conf, args);
args = genericParser.getRemainingArgs();
if (args.length < 2) {
System.out.println("usage: inputDirs outDir "
+ "[numOfReducer [textinputformat|seq [specfile [jobName]]]]");
GenericOptionsParser.printGenericCommandUsage(System.out);
System.exit(1);
}
String inputDir = args[0];
String outputDir = args[1];
int numOfReducers = 1;
if (args.length > 2) {
numOfReducers = Integer.parseInt(args[2]);
}
Class<? extends InputFormat> theInputFormat =
TextInputFormat.class;
if (args.length > 3 &&
args[3].compareToIgnoreCase("textinputformat") == 0) {
theInputFormat = TextInputFormat.class;
} else {
theInputFormat = SequenceFileInputFormat.class;
}
Path specFile = null;
if (args.length > 4) {
specFile = new Path(args[4]);
}
String jobName = "";
if (args.length > 5) {
jobName = args[5];
}
JobConf theJob = new JobConf(conf);
if (specFile != null) {
theJob.addResource(specFile);
}
String userJarFile = theJob.get("user.jar.file");
if (userJarFile == null) {
theJob.setJarByClass(caller != null ? caller : ValueAggregatorJob.class);
} else {
theJob.setJar(userJarFile);
}
theJob.setJobName("ValueAggregatorJob: " + jobName);
FileInputFormat.addInputPaths(theJob, inputDir);
theJob.setInputFormat(theInputFormat);
theJob.setMapperClass(ValueAggregatorMapper.class);
FileOutputFormat.setOutputPath(theJob, new Path(outputDir));
theJob.setOutputFormat(TextOutputFormat.class);
theJob.setMapOutputKeyClass(Text.class);
theJob.setMapOutputValueClass(Text.class);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
theJob.setReducerClass(ValueAggregatorReducer.class);
theJob.setCombinerClass(ValueAggregatorCombiner.class);
theJob.setNumMapTasks(1);
theJob.setNumReduceTasks(numOfReducers);
return theJob;
}
/**
* Create an Aggregate based map/reduce job.
*
* @param args the arguments used for job creation. Generic hadoop
* arguments are accepted.
* @return a JobConf object ready for submission.
*
* @throws IOException
* @see GenericOptionsParser
*/
public static JobConf createValueAggregatorJob(String args[])
throws IOException {
return createValueAggregatorJob(args, ValueAggregator.class);
}
public static JobConf createValueAggregatorJob(String args[]
, Class<? extends ValueAggregatorDescriptor>[] descriptors)
throws IOException {
JobConf job = createValueAggregatorJob(args);
setAggregatorDescriptors(job, descriptors);
return job;
}
public static void setAggregatorDescriptors(JobConf job
, Class<? extends ValueAggregatorDescriptor>[] descriptors) {
job.setInt("aggregator.descriptor.num", descriptors.length);
//specify the aggregator descriptors
for(int i=0; i< descriptors.length; i++) {
job.set("aggregator.descriptor." + i, "UserDefined," + descriptors[i].getName());
}
}
public static JobConf createValueAggregatorJob(String args[],
Class<? extends ValueAggregatorDescriptor>[] descriptors,
Class<?> caller) throws IOException {
JobConf job = createValueAggregatorJob(args, caller);
setAggregatorDescriptors(job, descriptors);
return job;
}
/**
* create and run an Aggregate based map/reduce job.
*
* @param args the arguments used for job creation
* @throws IOException
*/
public static void main(String args[]) throws IOException {
JobConf job = ValueAggregatorJob.createValueAggregatorJob(args);
JobClient.runJob(job);
}
}
| 8,876 | 36.142259 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
/**
* This interface defines the contract a value aggregator descriptor must
* support. Such a descriptor can be configured with a JobConf object. Its main
* function is to generate a list of aggregation-id/value pairs. An aggregation
* id encodes an aggregation type which is used to guide the way to aggregate
* the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
* an Aggregate based map/reduce job may create one or more of
* ValueAggregatorDescriptor objects at configuration time. For each input
* key/value pair, the mapper will use those objects to create aggregation
* id/value pairs.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ValueAggregatorDescriptor extends
org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorDescriptor {
public static final String TYPE_SEPARATOR = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorDescriptor.TYPE_SEPARATOR;
public static final Text ONE = org.apache.hadoop.mapreduce.
lib.aggregate.ValueAggregatorDescriptor.ONE;
/**
* Configure the object
*
* @param job
* a JobConf object that may contain the information that can be used
* to configure the object.
*/
public void configure(JobConf job);
}
| 2,339 | 40.052632 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This interface defines the minimal protocol for value aggregators.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ValueAggregator<E> extends
org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator<E> {
}
| 1,222 | 37.21875 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that maintain the biggest of
* a sequence of strings.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class StringValueMax
extends org.apache.hadoop.mapreduce.lib.aggregate.StringValueMax
implements ValueAggregator<String> {
}
| 1,280 | 36.676471 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that dedupes a sequence of objects.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UniqValueCount
extends org.apache.hadoop.mapreduce.lib.aggregate.UniqValueCount
implements ValueAggregator<Object> {
/**
* the default constructor
*
*/
public UniqValueCount() {
super();
}
/**
* constructor
* @param maxNum the limit in the number of unique values to keep.
*
*/
public UniqValueCount(long maxNum) {
super(maxNum);
}
}
| 1,522 | 30.081633 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that maintain the minimum of
* a sequence of long values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongValueMin
extends org.apache.hadoop.mapreduce.lib.aggregate.LongValueMin
implements ValueAggregator<String> {
}
| 1,280 | 36.676471 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.junit.Assert.assertEquals;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;
public class TestMapReduceTrackingUriPlugin {
@Test
public void testProducesHistoryServerUriForAppId() throws URISyntaxException {
final String historyAddress = "example.net:424242";
YarnConfiguration conf = new YarnConfiguration();
conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, historyAddress);
MapReduceTrackingUriPlugin plugin = new MapReduceTrackingUriPlugin();
plugin.setConf(conf);
ApplicationId id = ApplicationId.newInstance(6384623l, 5);
String jobSuffix = id.toString().replaceFirst("^application_", "job_");
URI expected =
new URI("http://" + historyAddress + "/jobhistory/job/" + jobSuffix);
URI actual = plugin.getTrackingUri(id);
assertEquals(expected, actual);
}
}
| 1,883 | 39.956522 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/MapReduceTrackingUriPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.util.TrackingUriPlugin;
public class MapReduceTrackingUriPlugin extends TrackingUriPlugin implements
Configurable {
@Override
public void setConf(Configuration conf) {
Configuration jobConf = null;
// Force loading of mapred configuration.
if (conf != null) {
jobConf = new JobConf(conf);
} else {
jobConf = new JobConf();
}
super.setConf(jobConf);
}
/**
* Gets the URI to access the given application on MapReduce history server
* @param id the ID for which a URI is returned
* @return the tracking URI
* @throws URISyntaxException
*/
@Override
public URI getTrackingUri(ApplicationId id) throws URISyntaxException {
String jobSuffix = id.toString().replaceFirst("^application_", "job_");
String historyServerAddress =
MRWebAppUtil.getJHSWebappURLWithScheme(getConf());
return new URI(historyServerAddress + "/jobhistory/job/"+ jobSuffix);
}
}
| 2,113 | 33.655738 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import static org.junit.Assert.assertEquals;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Before;
import org.junit.Test;
public class TestWordStats {
private final static String INPUT = "src/test/java/org/apache/hadoop/examples/pi/math";
private final static String MEAN_OUTPUT = "build/data/mean_output";
private final static String MEDIAN_OUTPUT = "build/data/median_output";
private final static String STDDEV_OUTPUT = "build/data/stddev_output";
/**
* Modified internal test class that is designed to read all the files in the
* input directory, and find the standard deviation between all of the word
* lengths.
*/
public static class WordStdDevReader {
private long wordsRead = 0;
private long wordLengthsRead = 0;
private long wordLengthsReadSquared = 0;
public WordStdDevReader() {
}
public double read(String path) throws IOException {
FileSystem fs = FileSystem.get(new Configuration());
FileStatus[] files = fs.listStatus(new Path(path));
for (FileStatus fileStat : files) {
if (!fileStat.isFile())
continue;
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(fs.open(fileStat.getPath())));
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
String word;
while (st.hasMoreTokens()) {
word = st.nextToken();
this.wordsRead++;
this.wordLengthsRead += word.length();
this.wordLengthsReadSquared += (long) Math.pow(word.length(), 2.0);
}
}
} catch (IOException e) {
System.out.println("Output could not be read!");
throw e;
} finally {
br.close();
}
}
double mean = (((double) this.wordLengthsRead) / ((double) this.wordsRead));
mean = Math.pow(mean, 2.0);
double term = (((double) this.wordLengthsReadSquared / ((double) this.wordsRead)));
double stddev = Math.sqrt((term - mean));
return stddev;
}
}
/**
* Modified internal test class that is designed to read all the files in the
* input directory, and find the median length of all the words.
*/
public static class WordMedianReader {
private long wordsRead = 0;
private TreeMap<Integer, Integer> map = new TreeMap<Integer, Integer>();
public WordMedianReader() {
}
public double read(String path) throws IOException {
FileSystem fs = FileSystem.get(new Configuration());
FileStatus[] files = fs.listStatus(new Path(path));
int num = 0;
for (FileStatus fileStat : files) {
if (!fileStat.isFile())
continue;
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(fs.open(fileStat.getPath())));
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
String word;
while (st.hasMoreTokens()) {
word = st.nextToken();
this.wordsRead++;
if (this.map.get(word.length()) == null) {
this.map.put(word.length(), 1);
} else {
int count = this.map.get(word.length());
this.map.put(word.length(), count + 1);
}
}
}
} catch (IOException e) {
System.out.println("Output could not be read!");
throw e;
} finally {
br.close();
}
}
int medianIndex1 = (int) Math.ceil((this.wordsRead / 2.0));
int medianIndex2 = (int) Math.floor((this.wordsRead / 2.0));
for (Integer key : this.map.navigableKeySet()) {
int prevNum = num;
num += this.map.get(key);
if (medianIndex2 >= prevNum && medianIndex1 <= num) {
return key;
} else if (medianIndex2 >= prevNum && medianIndex1 < num) {
Integer nextCurrLen = this.map.navigableKeySet().iterator().next();
double median = (key + nextCurrLen) / 2.0;
return median;
}
}
return -1;
}
}
/**
* Modified internal test class that is designed to read all the files in the
* input directory, and find the mean length of all the words.
*/
public static class WordMeanReader {
private long wordsRead = 0;
private long wordLengthsRead = 0;
public WordMeanReader() {
}
public double read(String path) throws IOException {
FileSystem fs = FileSystem.get(new Configuration());
FileStatus[] files = fs.listStatus(new Path(path));
for (FileStatus fileStat : files) {
if (!fileStat.isFile())
continue;
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(fs.open(fileStat.getPath())));
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
String word;
while (st.hasMoreTokens()) {
word = st.nextToken();
this.wordsRead++;
this.wordLengthsRead += word.length();
}
}
} catch (IOException e) {
System.out.println("Output could not be read!");
throw e;
} finally {
br.close();
}
}
double mean = (((double) this.wordLengthsRead) / ((double) this.wordsRead));
return mean;
}
}
/**
* Internal class designed to delete the output directory. Meant solely for
* use before and after the test is run; this is so next iterations of the
* test do not encounter a "file already exists" error.
*
* @param dir
* The directory to delete.
* @return Returns whether the deletion was successful or not.
*/
public static boolean deleteDir(File dir) {
if (dir.isDirectory()) {
String[] children = dir.list();
for (int i = 0; i < children.length; i++) {
boolean success = deleteDir(new File(dir, children[i]));
if (!success) {
System.out.println("Could not delete directory after test!");
return false;
}
}
}
// The directory is now empty so delete it
return dir.delete();
}
@Before public void setup() throws Exception {
deleteDir(new File(MEAN_OUTPUT));
deleteDir(new File(MEDIAN_OUTPUT));
deleteDir(new File(STDDEV_OUTPUT));
}
@Test public void testGetTheMean() throws Exception {
String args[] = new String[2];
args[0] = INPUT;
args[1] = MEAN_OUTPUT;
WordMean wm = new WordMean();
ToolRunner.run(new Configuration(), wm, args);
double mean = wm.getMean();
// outputs MUST match
WordMeanReader wr = new WordMeanReader();
assertEquals(mean, wr.read(INPUT), 0.0);
}
@Test public void testGetTheMedian() throws Exception {
String args[] = new String[2];
args[0] = INPUT;
args[1] = MEDIAN_OUTPUT;
WordMedian wm = new WordMedian();
ToolRunner.run(new Configuration(), wm, args);
double median = wm.getMedian();
// outputs MUST match
WordMedianReader wr = new WordMedianReader();
assertEquals(median, wr.read(INPUT), 0.0);
}
@Test public void testGetTheStandardDeviation() throws Exception {
String args[] = new String[2];
args[0] = INPUT;
args[1] = STDDEV_OUTPUT;
WordStandardDeviation wsd = new WordStandardDeviation();
ToolRunner.run(new Configuration(), wsd, args);
double stddev = wsd.getStandardDeviation();
// outputs MUST match
WordStdDevReader wr = new WordStdDevReader();
assertEquals(stddev, wr.read(INPUT), 0.0);
}
}
| 8,968 | 29.927586 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.math.BigInteger;
/** Tests for BaileyBorweinPlouffe */
public class TestBaileyBorweinPlouffe extends junit.framework.TestCase {
public void testMod() {
final BigInteger TWO = BigInteger.ONE.add(BigInteger.ONE);
for(long n = 3; n < 100; n++) {
for (long e = 1; e < 100; e++) {
final long r = TWO.modPow(
BigInteger.valueOf(e), BigInteger.valueOf(n)).longValue();
assertEquals("e=" + e + ", n=" + n, r, BaileyBorweinPlouffe.mod(e, n));
}
}
}
public void testHexDigit() {
final long[] answers = {0x43F6, 0xA308, 0x29B7, 0x49F1, 0x8AC8, 0x35EA};
long d = 1;
for(int i = 0; i < answers.length; i++) {
assertEquals("d=" + d, answers[i], BaileyBorweinPlouffe.hexDigits(d));
d *= 10;
}
assertEquals(0x243FL, BaileyBorweinPlouffe.hexDigits(0));
}
}
| 1,690 | 35.76087 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileAlreadyExistsException;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.util.ToolRunner;
public class TestTeraSort extends HadoopTestCase {
private static Log LOG = LogFactory.getLog(TestTeraSort.class);
public TestTeraSort()
throws IOException {
super(LOCAL_MR, LOCAL_FS, 1, 1);
}
protected void tearDown() throws Exception {
getFileSystem().delete(new Path(TEST_DIR), true);
super.tearDown();
}
// Input/Output paths for sort
private static final String TEST_DIR =
new File(System.getProperty("test.build.data", "/tmp"), "terasort")
.getAbsolutePath();
private static final Path SORT_INPUT_PATH = new Path(TEST_DIR, "sortin");
private static final Path SORT_OUTPUT_PATH = new Path(TEST_DIR, "sortout");
private static final Path TERA_OUTPUT_PATH = new Path(TEST_DIR, "validate");
private static final String NUM_ROWS = "100";
private void runTeraGen(Configuration conf, Path sortInput)
throws Exception {
String[] genArgs = {NUM_ROWS, sortInput.toString()};
// Run TeraGen
assertEquals(ToolRunner.run(conf, new TeraGen(), genArgs), 0);
}
private void runTeraSort(Configuration conf,
Path sortInput, Path sortOutput) throws Exception {
// Setup command-line arguments to 'sort'
String[] sortArgs = {sortInput.toString(), sortOutput.toString()};
// Run Sort
assertEquals(ToolRunner.run(conf, new TeraSort(), sortArgs), 0);
}
private void runTeraValidator(Configuration job,
Path sortOutput, Path valOutput)
throws Exception {
String[] svArgs = {sortOutput.toString(), valOutput.toString()};
// Run Tera-Validator
assertEquals(ToolRunner.run(job, new TeraValidate(), svArgs), 0);
}
public void testTeraSort() throws Exception {
// Run TeraGen to generate input for 'terasort'
runTeraGen(createJobConf(), SORT_INPUT_PATH);
// Run teragen again to check for FAE
try {
runTeraGen(createJobConf(), SORT_INPUT_PATH);
fail("Teragen output overwritten!");
} catch (FileAlreadyExistsException fae) {
LOG.info("Expected exception: ", fae);
}
// Run terasort
runTeraSort(createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH);
// Run terasort again to check for FAE
try {
runTeraSort(createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH);
fail("Terasort output overwritten!");
} catch (FileAlreadyExistsException fae) {
LOG.info("Expected exception: ", fae);
}
// Run tera-validator to check if sort worked correctly
runTeraValidator(createJobConf(), SORT_OUTPUT_PATH,
TERA_OUTPUT_PATH);
}
public void testTeraSortWithLessThanTwoArgs() throws Exception {
String[] args = new String[1];
assertEquals(new TeraSort().run(args), 2);
}
}
| 3,934 | 33.823009 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi.math;
import java.math.BigInteger;
import java.util.Random;
import org.apache.hadoop.examples.pi.Util.Timer;
public class TestModular extends junit.framework.TestCase {
private static final Random RANDOM = new Random();
private static final BigInteger TWO = BigInteger.valueOf(2);
static final int DIV_VALID_BIT = 32;
static final long DIV_LIMIT = 1L << DIV_VALID_BIT;
// return r/n for n > r > 0
static long div(long sum, long r, long n) {
long q = 0;
int i = DIV_VALID_BIT - 1;
for(r <<= 1; r < n; r <<= 1) i--;
//System.out.printf(" r=%d, n=%d, q=%d\n", r, n, q);
for(; i >= 0 ;) {
r -= n;
q |= (1L << i);
if (r <= 0) break;
for(; r < n; r <<= 1) i--;
//System.out.printf(" r=%d, n=%d, q=%d\n", r, n, q);
}
sum += q;
return sum < DIV_LIMIT? sum: sum - DIV_LIMIT;
}
public void testDiv() {
for(long n = 2; n < 100; n++)
for(long r = 1; r < n; r++) {
final long a = div(0, r, n);
final long b = (long)((r*1.0/n) * (1L << DIV_VALID_BIT));
final String s = String.format("r=%d, n=%d, a=%X, b=%X", r, n, a, b);
assertEquals(s, b, a);
}
}
static long[][][] generateRN(int nsize, int rsize) {
final long[][][] rn = new long[nsize][][];
for(int i = 0; i < rn.length; i++) {
rn[i] = new long[rsize + 1][];
long n = RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL;
if (n <= 1) n = 0xFFFFFFFFFFFFFFFL - n;
rn[i][0] = new long[]{n};
final BigInteger N = BigInteger.valueOf(n);
for(int j = 1; j < rn[i].length; j++) {
long r = RANDOM.nextLong();
if (r < 0) r = -r;
if (r >= n) r %= n;
final BigInteger R = BigInteger.valueOf(r);
rn[i][j] = new long[]{r, R.multiply(R).mod(N).longValue()};
}
}
return rn;
}
static long square_slow(long z, final long n) {
long r = 0;
for(long s = z; z > 0; z >>= 1) {
if ((((int)z) & 1) == 1) {
r += s;
if (r >= n) r -= n;
}
s <<= 1;
if (s >= n) s -= n;
}
return r;
}
//0 <= r < n < max/2
static long square(long r, final long n, long r2p64) {
if (r <= Modular.MAX_SQRT_LONG) {
r *= r;
if (r >= n) r %= n;
} else {
final int HALF = (63 - Long.numberOfLeadingZeros(n)) >> 1;
final int FULL = HALF << 1;
final long ONES = (1 << HALF) - 1;
final long high = r >>> HALF;
final long low = r &= ONES;
r *= r;
if (r >= n) r %= n;
if (high != 0) {
long s = high * high;
if (s >= n) s %= n;
for(int i = 0; i < FULL; i++)
if ((s <<= 1) >= n) s -= n;
if (low == 0)
r = s;
else {
long t = high * low;
if (t >= n) t %= n;
for(int i = -1; i < HALF; i++)
if ((t <<= 1) >= n) t -= n;
r += s;
if (r >= n) r -= n;
r += t;
if (r >= n) r -= n;
}
}
}
return r;
}
static void squareBenchmarks() {
final Timer t = new Timer(false);
t.tick("squareBenchmarks(), MAX_SQRT=" + Modular.MAX_SQRT_LONG);
final long[][][] rn = generateRN(1000, 1000);
t.tick("generateRN");
for(int i = 0; i < rn.length; i++) {
final long n = rn[i][0][0];
for(int j = 1; j < rn[i].length; j++) {
final long r = rn[i][j][0];
final long answer = rn[i][j][1];
final long s = square_slow(r, n);
if (s != answer)
assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
}
}
t.tick("square_slow");
for(int i = 0; i < rn.length; i++) {
final long n = rn[i][0][0];
long r2p64 = (0x4000000000000000L % n) << 1;
if (r2p64 >= n) r2p64 -= n;
for(int j = 1; j < rn[i].length; j++) {
final long r = rn[i][j][0];
final long answer = rn[i][j][1];
final long s = square(r, n, r2p64);
if (s != answer)
assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
}
}
t.tick("square");
for(int i = 0; i < rn.length; i++) {
final long n = rn[i][0][0];
final BigInteger N = BigInteger.valueOf(n);
for(int j = 1; j < rn[i].length; j++) {
final long r = rn[i][j][0];
final long answer = rn[i][j][1];
final BigInteger R = BigInteger.valueOf(r);
final long s = R.multiply(R).mod(N).longValue();
if (s != answer)
assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
}
}
t.tick("R.multiply(R).mod(N)");
for(int i = 0; i < rn.length; i++) {
final long n = rn[i][0][0];
final BigInteger N = BigInteger.valueOf(n);
for(int j = 1; j < rn[i].length; j++) {
final long r = rn[i][j][0];
final long answer = rn[i][j][1];
final BigInteger R = BigInteger.valueOf(r);
final long s = R.modPow(TWO, N).longValue();
if (s != answer)
assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
}
}
t.tick("R.modPow(TWO, N)");
}
static long[][][] generateEN(int nsize, int esize) {
final long[][][] en = new long[nsize][][];
for(int i = 0; i < en.length; i++) {
en[i] = new long[esize + 1][];
long n = (RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL) | 1L;
if (n == 1) n = 3;
en[i][0] = new long[]{n};
final BigInteger N = BigInteger.valueOf(n);
for(int j = 1; j < en[i].length; j++) {
long e = RANDOM.nextLong();
if (e < 0) e = -e;
final BigInteger E = BigInteger.valueOf(e);
en[i][j] = new long[]{e, TWO.modPow(E, N).longValue()};
}
}
return en;
}
/** Compute $2^e \mod n$ for e > 0, n > 2 */
static long modBigInteger(final long e, final long n) {
long mask = (e & 0xFFFFFFFF00000000L) == 0 ? 0x00000000FFFFFFFFL
: 0xFFFFFFFF00000000L;
mask &= (e & 0xFFFF0000FFFF0000L & mask) == 0 ? 0x0000FFFF0000FFFFL
: 0xFFFF0000FFFF0000L;
mask &= (e & 0xFF00FF00FF00FF00L & mask) == 0 ? 0x00FF00FF00FF00FFL
: 0xFF00FF00FF00FF00L;
mask &= (e & 0xF0F0F0F0F0F0F0F0L & mask) == 0 ? 0x0F0F0F0F0F0F0F0FL
: 0xF0F0F0F0F0F0F0F0L;
mask &= (e & 0xCCCCCCCCCCCCCCCCL & mask) == 0 ? 0x3333333333333333L
: 0xCCCCCCCCCCCCCCCCL;
mask &= (e & 0xAAAAAAAAAAAAAAAAL & mask) == 0 ? 0x5555555555555555L
: 0xAAAAAAAAAAAAAAAAL;
final BigInteger N = BigInteger.valueOf(n);
long r = 2;
for (mask >>= 1; mask > 0; mask >>= 1) {
if (r <= Modular.MAX_SQRT_LONG) {
r *= r;
if (r >= n) r %= n;
} else {
final BigInteger R = BigInteger.valueOf(r);
r = R.multiply(R).mod(N).longValue();
}
if ((e & mask) != 0) {
r <<= 1;
if (r >= n) r -= n;
}
}
return r;
}
static class Montgomery2 extends Montgomery {
/** Compute 2^y mod N for N odd. */
long mod2(final long y) {
long r0 = R - N;
long r1 = r0 << 1;
if (r1 >= N) r1 -= N;
for(long mask = Long.highestOneBit(y); mask > 0; mask >>>= 1) {
if ((mask & y) == 0) {
r1 = product.m(r0, r1);
r0 = product.m(r0, r0);
} else {
r0 = product.m(r0, r1);
r1 = product.m(r1, r1);
}
}
return product.m(r0, 1);
}
}
static void modBenchmarks() {
final Timer t = new Timer(false);
t.tick("modBenchmarks()");
final long[][][] en = generateEN(10000, 10);
t.tick("generateEN");
for(int i = 0; i < en.length; i++) {
final long n = en[i][0][0];
for(int j = 1; j < en[i].length; j++) {
final long e = en[i][j][0];
final long answer = en[i][j][1];
final long s = Modular.mod(e, n);
if (s != answer)
assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
}
}
t.tick("Modular.mod");
final Montgomery2 m2 = new Montgomery2();
for(int i = 0; i < en.length; i++) {
final long n = en[i][0][0];
m2.set(n);
for(int j = 1; j < en[i].length; j++) {
final long e = en[i][j][0];
final long answer = en[i][j][1];
final long s = m2.mod(e);
if (s != answer)
assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
}
}
t.tick("montgomery.mod");
for(int i = 0; i < en.length; i++) {
final long n = en[i][0][0];
m2.set(n);
for(int j = 1; j < en[i].length; j++) {
final long e = en[i][j][0];
final long answer = en[i][j][1];
final long s = m2.mod2(e);
if (s != answer)
assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
}
}
t.tick("montgomery.mod2");
for(int i = 0; i < en.length; i++) {
final long n = en[i][0][0];
final BigInteger N = BigInteger.valueOf(n);
for(int j = 1; j < en[i].length; j++) {
final long e = en[i][j][0];
final long answer = en[i][j][1];
final long s = TWO.modPow(BigInteger.valueOf(e), N).longValue();
if (s != answer)
assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
}
}
t.tick("BigInteger.modPow(e, n)");
}
public static void main(String[] args) {
squareBenchmarks();
modBenchmarks();
}
}
| 10,379 | 29.801187 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi.math;
import java.math.BigInteger;
import java.util.Random;
public class TestLongLong extends junit.framework.TestCase {
static final Random RAN = new Random();
static final long MASK = (1L << (LongLong.SIZE >> 1)) - 1;
static long nextPositiveLong() {
return RAN.nextLong() & MASK;
}
static void verifyMultiplication(long a, long b) {
final LongLong ll = LongLong.multiplication(new LongLong(), a, b);
final BigInteger bi = BigInteger.valueOf(a).multiply(BigInteger.valueOf(b));
final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
//System.out.println(s);
assertEquals(s, bi, ll.toBigInteger());
}
public void testMultiplication() {
for(int i = 0; i < 100; i++) {
final long a = nextPositiveLong();
final long b = nextPositiveLong();
verifyMultiplication(a, b);
}
final long max = Long.MAX_VALUE & MASK;
verifyMultiplication(max, max);
}
static void verifyRightShift(long a, long b) {
final LongLong ll = new LongLong().set(a, b);
final BigInteger bi = ll.toBigInteger();
for(int i = 0; i < LongLong.SIZE >> 1; i++) {
final long result = ll.shiftRight(i) & MASK;
final long expected = bi.shiftRight(i).longValue() & MASK;
final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
assertEquals(s, expected, result);
}
final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
//System.out.println(s);
assertEquals(s, bi, ll.toBigInteger());
}
public void testRightShift() {
for(int i = 0; i < 1000; i++) {
final long a = nextPositiveLong();
final long b = nextPositiveLong();
verifyMultiplication(a, b);
}
}
}
| 2,679 | 35.216216 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi.math;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.examples.pi.Container;
import org.apache.hadoop.examples.pi.Util;
import org.apache.hadoop.examples.pi.Util.Timer;
import org.apache.hadoop.examples.pi.math.TestModular.Montgomery2;
public class TestSummation extends junit.framework.TestCase {
static final Random RANDOM = new Random();
static final BigInteger TWO = BigInteger.valueOf(2);
private static Summation2 newSummation(final long base, final long range, final long delta) {
final ArithmeticProgression N = new ArithmeticProgression('n', base+3, delta, base+3+range);
final ArithmeticProgression E = new ArithmeticProgression('e', base+range, -delta, base);
return new Summation2(N, E);
}
private static void runTestSubtract(Summation sigma, List<Summation> diff) {
// Util.out.println("diff=" + diff);
List<Container<Summation>> tmp = new ArrayList<Container<Summation>>(diff.size());
for(Summation s : diff)
tmp.add(s);
final List<Summation> a = sigma.remainingTerms(tmp);
// Util.out.println("a =" + a);
a.addAll(diff);
for(Summation s : a)
s.compute();
final List<Summation> combined = Util.combine(a);
// Util.out.println("combined=" + combined);
assertEquals(1, combined.size());
assertEquals(sigma, combined.get(0));
}
public void testSubtract() {
final Summation sigma = newSummation(3, 10000, 20);
final int size = 10;
final List<Summation> parts = Arrays.asList(sigma.partition(size));
Collections.sort(parts);
runTestSubtract(sigma, new ArrayList<Summation>());
runTestSubtract(sigma, parts);
for(int n = 1; n < size; n++) {
for(int j = 0; j < 10; j++) {
final List<Summation> diff = new ArrayList<Summation>(parts);
for(int i = 0; i < n; i++)
diff.remove(RANDOM.nextInt(diff.size()));
/// Collections.sort(diff);
runTestSubtract(sigma, diff);
}
}
}
static class Summation2 extends Summation {
Summation2(ArithmeticProgression N, ArithmeticProgression E) {
super(N, E);
}
final Montgomery2 m2 = new Montgomery2();
double compute_montgomery2() {
long e = E.value;
long n = N.value;
double s = 0;
for(; e > E.limit; e += E.delta) {
m2.set(n);
s = Modular.addMod(s, m2.mod2(e)/(double)n);
n += N.delta;
}
return s;
}
double compute_modBigInteger() {
long e = E.value;
long n = N.value;
double s = 0;
for(; e > E.limit; e += E.delta) {
s = Modular.addMod(s, TestModular.modBigInteger(e, n)/(double)n);
n += N.delta;
}
return s;
}
double compute_modPow() {
long e = E.value;
long n = N.value;
double s = 0;
for(; e > E.limit; e += E.delta) {
s = Modular.addMod(s, TWO.modPow(BigInteger.valueOf(e), BigInteger.valueOf(n)).doubleValue()/n);
n += N.delta;
}
return s;
}
}
private static void computeBenchmarks(final Summation2 sigma) {
final Timer t = new Timer(false);
t.tick("sigma=" + sigma);
final double value = sigma.compute();
t.tick("compute=" + value);
assertEquals(value, sigma.compute_modular());
t.tick("compute_modular");
assertEquals(value, sigma.compute_montgomery());
t.tick("compute_montgomery");
assertEquals(value, sigma.compute_montgomery2());
t.tick("compute_montgomery2");
assertEquals(value, sigma.compute_modBigInteger());
t.tick("compute_modBigInteger");
assertEquals(value, sigma.compute_modPow());
t.tick("compute_modPow");
}
/** Benchmarks */
public static void main(String[] args) {
final long delta = 1L << 4;
final long range = 1L << 20;
for(int i = 20; i < 40; i += 2)
computeBenchmarks(newSummation(1L << i, range, delta));
}
}
| 4,848 | 31.763514 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.IOException;
import org.apache.hadoop.examples.DBCountPageView;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.util.ToolRunner;
public class TestDBJob extends HadoopTestCase {
public TestDBJob() throws IOException {
super(LOCAL_MR, LOCAL_FS, 3, 1);
}
public void testRun() throws Exception {
DBCountPageView testDriver = new DBCountPageView();
ToolRunner.run(createJobConf(), testDriver, new String[0]);
}
}
| 1,328 | 32.225 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.IOException;
import java.net.URI;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.InputSampler;
import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This is the trivial map/reduce program that does absolutely nothing
* other than use the framework to fragment and sort the input values.
*
* To run: bin/hadoop jar build/hadoop-examples.jar sort
* [-r <i>reduces</i>]
* [-inFormat <i>input format class</i>]
* [-outFormat <i>output format class</i>]
* [-outKey <i>output key class</i>]
* [-outValue <i>output value class</i>]
* [-totalOrder <i>pcnt</i> <i>num samples</i> <i>max splits</i>]
* <i>in-dir</i> <i>out-dir</i>
*/
public class Sort<K,V> extends Configured implements Tool {
public static final String REDUCES_PER_HOST =
"mapreduce.sort.reducesperhost";
private Job job = null;
static int printUsage() {
System.out.println("sort [-r <reduces>] " +
"[-inFormat <input format class>] " +
"[-outFormat <output format class>] " +
"[-outKey <output key class>] " +
"[-outValue <output value class>] " +
"[-totalOrder <pcnt> <num samples> <max splits>] " +
"<input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
/**
* The main driver for sort program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
public int run(String[] args) throws Exception {
Configuration conf = getConf();
JobClient client = new JobClient(conf);
ClusterStatus cluster = client.getClusterStatus();
int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
String sort_reduces = conf.get(REDUCES_PER_HOST);
if (sort_reduces != null) {
num_reduces = cluster.getTaskTrackers() *
Integer.parseInt(sort_reduces);
}
Class<? extends InputFormat> inputFormatClass =
SequenceFileInputFormat.class;
Class<? extends OutputFormat> outputFormatClass =
SequenceFileOutputFormat.class;
Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
Class<? extends Writable> outputValueClass = BytesWritable.class;
List<String> otherArgs = new ArrayList<String>();
InputSampler.Sampler<K,V> sampler = null;
for(int i=0; i < args.length; ++i) {
try {
if ("-r".equals(args[i])) {
num_reduces = Integer.parseInt(args[++i]);
} else if ("-inFormat".equals(args[i])) {
inputFormatClass =
Class.forName(args[++i]).asSubclass(InputFormat.class);
} else if ("-outFormat".equals(args[i])) {
outputFormatClass =
Class.forName(args[++i]).asSubclass(OutputFormat.class);
} else if ("-outKey".equals(args[i])) {
outputKeyClass =
Class.forName(args[++i]).asSubclass(WritableComparable.class);
} else if ("-outValue".equals(args[i])) {
outputValueClass =
Class.forName(args[++i]).asSubclass(Writable.class);
} else if ("-totalOrder".equals(args[i])) {
double pcnt = Double.parseDouble(args[++i]);
int numSamples = Integer.parseInt(args[++i]);
int maxSplits = Integer.parseInt(args[++i]);
if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE;
sampler =
new InputSampler.RandomSampler<K,V>(pcnt, numSamples, maxSplits);
} else {
otherArgs.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " +
args[i-1]);
return printUsage(); // exits
}
}
// Set user-supplied (possibly default) job configs
job = Job.getInstance(conf);
job.setJobName("sorter");
job.setJarByClass(Sort.class);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
job.setNumReduceTasks(num_reduces);
job.setInputFormatClass(inputFormatClass);
job.setOutputFormatClass(outputFormatClass);
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
// Make sure there are exactly 2 parameters left.
if (otherArgs.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: " +
otherArgs.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(job, otherArgs.get(0));
FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1)));
if (sampler != null) {
System.out.println("Sampling input to effect total-order sort...");
job.setPartitionerClass(TotalOrderPartitioner.class);
Path inputDir = FileInputFormat.getInputPaths(job)[0];
FileSystem fs = inputDir.getFileSystem(conf);
inputDir = inputDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
Path partitionFile = new Path(inputDir, "_sortPartitioning");
TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
InputSampler.<K,V>writePartitionFile(job, sampler);
URI partitionUri = new URI(partitionFile.toString() +
"#" + "_sortPartitioning");
job.addCacheFile(partitionUri);
}
System.out.println("Running on " +
cluster.getTaskTrackers() +
" nodes to sort from " +
FileInputFormat.getInputPaths(job)[0] + " into " +
FileOutputFormat.getOutputPath(job) +
" with " + num_reduces + " reduces.");
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " +
(end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
return ret;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Sort(), args);
System.exit(res);
}
/**
* Get the last job that was run using this instance.
* @return the results of the last job that was run
*/
public Job getResult() {
return job;
}
}
| 8,194 | 39.171569 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/AggregateWordHistogram.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Map.Entry;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorBaseDescriptor;
import org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorJob;
/**
* This is an example Aggregated Hadoop Map/Reduce application. Computes the
* histogram of the words in the input texts.
*
* To run: bin/hadoop jar hadoop-*-examples.jar aggregatewordhist <i>in-dir</i>
* <i>out-dir</i> <i>numOfReducers</i> textinputformat
*
*/
public class AggregateWordHistogram {
public static class AggregateWordHistogramPlugin
extends ValueAggregatorBaseDescriptor {
/**
* Parse the given value, generate an aggregation-id/value pair per word.
* The ID is of type VALUE_HISTOGRAM, with WORD_HISTOGRAM as the real id.
* The value is WORD\t1.
*
* @return a list of the generated pairs.
*/
@Override
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
Object val) {
String words[] = val.toString().split(" |\t");
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
for (int i = 0; i < words.length; i++) {
Text valCount = new Text(words[i] + "\t" + "1");
Entry<Text, Text> en = generateEntry(VALUE_HISTOGRAM, "WORD_HISTOGRAM",
valCount);
retv.add(en);
}
return retv;
}
}
/**
* The main driver for word count map/reduce program. Invoke this method to
* submit the map/reduce job.
*
* @throws IOException
* When there is communication problems with the job tracker.
*/
@SuppressWarnings("unchecked")
public static void main(String[] args)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = ValueAggregatorJob.createValueAggregatorJob(args
, new Class[] {AggregateWordHistogramPlugin.class});
job.setJarByClass(AggregateWordCount.class);
int ret = job.waitForCompletion(true) ? 0 : 1;
System.exit(ret);
}
}
| 3,016 | 35.349398 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java
|
package org.apache.hadoop.examples;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Charsets;
public class WordMedian extends Configured implements Tool {
private double median = 0;
private final static IntWritable ONE = new IntWritable(1);
/**
* Maps words from line of text into a key-value pair; the length of the word
* as the key, and 1 as the value.
*/
public static class WordMedianMapper extends
Mapper<Object, Text, IntWritable, IntWritable> {
private IntWritable length = new IntWritable();
/**
* Emits a key-value pair for counting the word. Outputs are (IntWritable,
* IntWritable).
*
* @param value
* This will be a line of text coming in from our input file.
*/
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String string = itr.nextToken();
length.set(string.length());
context.write(length, ONE);
}
}
}
/**
* Performs integer summation of all the values for each key.
*/
public static class WordMedianReducer extends
Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
private IntWritable val = new IntWritable();
/**
* Sums all the individual values within the iterator and writes them to the
* same key.
*
* @param key
* This will be a length of a word that was read.
* @param values
* This will be an iterator of all the values associated with that
* key.
*/
public void reduce(IntWritable key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
val.set(sum);
context.write(key, val);
}
}
/**
* This is a standard program to read and find a median value based on a file
* of word counts such as: 1 456, 2 132, 3 56... Where the first values are
* the word lengths and the following values are the number of times that
* words of that length appear.
*
* @param path
* The path to read the HDFS file from (part-r-00000...00001...etc).
* @param medianIndex1
* The first length value to look for.
* @param medianIndex2
* The second length value to look for (will be the same as the first
* if there are an even number of words total).
* @throws IOException
* If file cannot be found, we throw an exception.
* */
private double readAndFindMedian(String path, int medianIndex1,
int medianIndex2, Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(conf);
Path file = new Path(path, "part-r-00000");
if (!fs.exists(file))
throw new IOException("Output not found!");
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8));
int num = 0;
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
// grab length
String currLen = st.nextToken();
// grab count
String lengthFreq = st.nextToken();
int prevNum = num;
num += Integer.parseInt(lengthFreq);
if (medianIndex2 >= prevNum && medianIndex1 <= num) {
System.out.println("The median is: " + currLen);
br.close();
return Double.parseDouble(currLen);
} else if (medianIndex2 >= prevNum && medianIndex1 < num) {
String nextCurrLen = st.nextToken();
double theMedian = (Integer.parseInt(currLen) + Integer
.parseInt(nextCurrLen)) / 2.0;
System.out.println("The median is: " + theMedian);
br.close();
return theMedian;
}
}
} finally {
if (br != null) {
br.close();
}
}
// error, no median found
return -1;
}
public static void main(String[] args) throws Exception {
ToolRunner.run(new Configuration(), new WordMedian(), args);
}
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("Usage: wordmedian <in> <out>");
return 0;
}
setConf(new Configuration());
Configuration conf = getConf();
Job job = Job.getInstance(conf, "word median");
job.setJarByClass(WordMedian.class);
job.setMapperClass(WordMedianMapper.class);
job.setCombinerClass(WordMedianReducer.class);
job.setReducerClass(WordMedianReducer.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean result = job.waitForCompletion(true);
// Wait for JOB 1 -- get middle value to check for Median
long totalWords = job.getCounters()
.getGroup(TaskCounter.class.getCanonicalName())
.findCounter("MAP_OUTPUT_RECORDS", "Map output records").getValue();
int medianIndex1 = (int) Math.ceil((totalWords / 2.0));
int medianIndex2 = (int) Math.floor((totalWords / 2.0));
median = readAndFindMedian(args[1], medianIndex1, medianIndex2, conf);
return (result ? 0 : 1);
}
public double getMedian() {
return median;
}
}
| 7,055 | 32.283019 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/AggregateWordCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.IOException;
import java.util.ArrayList;
import java.util.StringTokenizer;
import java.util.Map.Entry;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorBaseDescriptor;
import org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorJob;
/**
* This is an example Aggregated Hadoop Map/Reduce application. It reads the
* text input files, breaks each line into words and counts them. The output is
* a locally sorted list of words and the count of how often they occurred.
*
* To run: bin/hadoop jar hadoop-*-examples.jar aggregatewordcount
* <i>in-dir</i> <i>out-dir</i> <i>numOfReducers</i> textinputformat
*
*/
public class AggregateWordCount {
public static class WordCountPlugInClass extends
ValueAggregatorBaseDescriptor {
@Override
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
Object val) {
String countType = LONG_VALUE_SUM;
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
String line = val.toString();
StringTokenizer itr = new StringTokenizer(line);
while (itr.hasMoreTokens()) {
Entry<Text, Text> e = generateEntry(countType, itr.nextToken(), ONE);
if (e != null) {
retv.add(e);
}
}
return retv;
}
}
/**
* The main driver for word count map/reduce program. Invoke this method to
* submit the map/reduce job.
*
* @throws IOException
* When there is communication problems with the job tracker.
*/
@SuppressWarnings("unchecked")
public static void main(String[] args)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = ValueAggregatorJob.createValueAggregatorJob(args
, new Class[] {WordCountPlugInClass.class});
job.setJarByClass(AggregateWordCount.class);
int ret = job.waitForCompletion(true) ? 0 : 1;
System.exit(ret);
}
}
| 2,897 | 35.683544 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This program uses map/reduce to just run a distributed job where there is
* no interaction between the tasks and each task write a large unsorted
* random binary sequence file of BytesWritable.
* In order for this program to generate data for terasort with 10-byte keys
* and 90-byte values, have the following config:
* <pre>{@code
* <?xml version="1.0"?>
* <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
* <configuration>
* <property>
* <name>mapreduce.randomwriter.minkey</name>
* <value>10</value>
* </property>
* <property>
* <name>mapreduce.randomwriter.maxkey</name>
* <value>10</value>
* </property>
* <property>
* <name>mapreduce.randomwriter.minvalue</name>
* <value>90</value>
* </property>
* <property>
* <name>mapreduce.randomwriter.maxvalue</name>
* <value>90</value>
* </property>
* <property>
* <name>mapreduce.randomwriter.totalbytes</name>
* <value>1099511627776</value>
* </property>
* </configuration>}</pre>
* Equivalently, {@link RandomWriter} also supports all the above options
* and ones supported by {@link GenericOptionsParser} via the command-line.
*/
public class RandomWriter extends Configured implements Tool {
public static final String TOTAL_BYTES = "mapreduce.randomwriter.totalbytes";
public static final String BYTES_PER_MAP =
"mapreduce.randomwriter.bytespermap";
public static final String MAPS_PER_HOST =
"mapreduce.randomwriter.mapsperhost";
public static final String MAX_VALUE = "mapreduce.randomwriter.maxvalue";
public static final String MIN_VALUE = "mapreduce.randomwriter.minvalue";
public static final String MIN_KEY = "mapreduce.randomwriter.minkey";
public static final String MAX_KEY = "mapreduce.randomwriter.maxkey";
/**
* User counters
*/
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
/**
* A custom input format that creates virtual inputs of a single string
* for each map.
*/
static class RandomInputFormat extends InputFormat<Text, Text> {
/**
* Generate the requested number of file splits, with the filename
* set to the filename of the output file.
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
List<InputSplit> result = new ArrayList<InputSplit>();
Path outDir = FileOutputFormat.getOutputPath(job);
int numSplits =
job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
for(int i=0; i < numSplits; ++i) {
result.add(new FileSplit(new Path(outDir, "dummy-split-" + i), 0, 1,
(String[])null));
}
return result;
}
/**
* Return a single record (filename, "") where the filename is taken from
* the file split.
*/
static class RandomRecordReader extends RecordReader<Text, Text> {
Path name;
Text key = null;
Text value = new Text();
public RandomRecordReader(Path p) {
name = p;
}
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
}
public boolean nextKeyValue() {
if (name != null) {
key = new Text();
key.set(name.getName());
name = null;
return true;
}
return false;
}
public Text getCurrentKey() {
return key;
}
public Text getCurrentValue() {
return value;
}
public void close() {}
public float getProgress() {
return 0.0f;
}
}
public RecordReader<Text, Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
return new RandomRecordReader(((FileSplit) split).getPath());
}
}
static class RandomMapper extends Mapper<WritableComparable, Writable,
BytesWritable, BytesWritable> {
private long numBytesToWrite;
private int minKeySize;
private int keySizeRange;
private int minValueSize;
private int valueSizeRange;
private Random random = new Random();
private BytesWritable randomKey = new BytesWritable();
private BytesWritable randomValue = new BytesWritable();
private void randomizeBytes(byte[] data, int offset, int length) {
for(int i=offset + length - 1; i >= offset; --i) {
data[i] = (byte) random.nextInt(256);
}
}
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
/**
* Save the values out of the configuaration that we need to write
* the data.
*/
@Override
public void setup(Context context) {
Configuration conf = context.getConfiguration();
numBytesToWrite = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
minKeySize = conf.getInt(MIN_KEY, 10);
keySizeRange =
conf.getInt(MAX_KEY, 1000) - minKeySize;
minValueSize = conf.getInt(MIN_VALUE, 0);
valueSizeRange =
conf.getInt(MAX_VALUE, 20000) - minValueSize;
}
}
/**
* This is the main routine for launching a distributed random write job.
* It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
* The reduce doesn't do anything.
*
* @throws IOException
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: writer <out-dir>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
Path outDir = new Path(args[0]);
Configuration conf = getConf();
JobClient client = new JobClient(conf);
ClusterStatus cluster = client.getClusterStatus();
int numMapsPerHost = conf.getInt(MAPS_PER_HOST, 10);
long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
if (numBytesToWritePerMap == 0) {
System.err.println("Cannot have" + BYTES_PER_MAP + " set to 0");
return -2;
}
long totalBytesToWrite = conf.getLong(TOTAL_BYTES,
numMapsPerHost*numBytesToWritePerMap*cluster.getTaskTrackers());
int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap);
if (numMaps == 0 && totalBytesToWrite > 0) {
numMaps = 1;
conf.setLong(BYTES_PER_MAP, totalBytesToWrite);
}
conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
Job job = Job.getInstance(conf);
job.setJarByClass(RandomWriter.class);
job.setJobName("random-writer");
FileOutputFormat.setOutputPath(job, outDir);
job.setOutputKeyClass(BytesWritable.class);
job.setOutputValueClass(BytesWritable.class);
job.setInputFormatClass(RandomInputFormat.class);
job.setMapperClass(RandomMapper.class);
job.setReducerClass(Reducer.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
System.out.println("Running " + numMaps + " maps.");
// reducer NONE
job.setNumReduceTasks(0);
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return ret;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new RandomWriter(), args);
System.exit(res);
}
}
| 10,584 | 34.520134 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Join.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.join.CompositeInputFormat;
import org.apache.hadoop.mapreduce.lib.join.TupleWritable;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Given a set of sorted datasets keyed with the same class and yielding
* equal partitions, it is possible to effect a join of those datasets
* prior to the map. The example facilitates the same.
*
* To run: bin/hadoop jar build/hadoop-examples.jar join
* [-r <i>reduces</i>]
* [-inFormat <i>input format class</i>]
* [-outFormat <i>output format class</i>]
* [-outKey <i>output key class</i>]
* [-outValue <i>output value class</i>]
* [-joinOp <inner|outer|override>]
* [<i>in-dir</i>]* <i>in-dir</i> <i>out-dir</i>
*/
public class Join extends Configured implements Tool {
public static final String REDUCES_PER_HOST = "mapreduce.join.reduces_per_host";
static int printUsage() {
System.out.println("join [-r <reduces>] " +
"[-inFormat <input format class>] " +
"[-outFormat <output format class>] " +
"[-outKey <output key class>] " +
"[-outValue <output value class>] " +
"[-joinOp <inner|outer|override>] " +
"[input]* <input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
/**
* The main driver for sort program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
@SuppressWarnings("unchecked")
public int run(String[] args) throws Exception {
Configuration conf = getConf();
JobClient client = new JobClient(conf);
ClusterStatus cluster = client.getClusterStatus();
int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
String join_reduces = conf.get(REDUCES_PER_HOST);
if (join_reduces != null) {
num_reduces = cluster.getTaskTrackers() *
Integer.parseInt(join_reduces);
}
Job job = Job.getInstance(conf);
job.setJobName("join");
job.setJarByClass(Sort.class);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
Class<? extends InputFormat> inputFormatClass =
SequenceFileInputFormat.class;
Class<? extends OutputFormat> outputFormatClass =
SequenceFileOutputFormat.class;
Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
Class<? extends Writable> outputValueClass = TupleWritable.class;
String op = "inner";
List<String> otherArgs = new ArrayList<String>();
for(int i=0; i < args.length; ++i) {
try {
if ("-r".equals(args[i])) {
num_reduces = Integer.parseInt(args[++i]);
} else if ("-inFormat".equals(args[i])) {
inputFormatClass =
Class.forName(args[++i]).asSubclass(InputFormat.class);
} else if ("-outFormat".equals(args[i])) {
outputFormatClass =
Class.forName(args[++i]).asSubclass(OutputFormat.class);
} else if ("-outKey".equals(args[i])) {
outputKeyClass =
Class.forName(args[++i]).asSubclass(WritableComparable.class);
} else if ("-outValue".equals(args[i])) {
outputValueClass =
Class.forName(args[++i]).asSubclass(Writable.class);
} else if ("-joinOp".equals(args[i])) {
op = args[++i];
} else {
otherArgs.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " +
args[i-1]);
return printUsage(); // exits
}
}
// Set user-supplied (possibly default) job configs
job.setNumReduceTasks(num_reduces);
if (otherArgs.size() < 2) {
System.out.println("ERROR: Wrong number of parameters: ");
return printUsage();
}
FileOutputFormat.setOutputPath(job,
new Path(otherArgs.remove(otherArgs.size() - 1)));
List<Path> plist = new ArrayList<Path>(otherArgs.size());
for (String s : otherArgs) {
plist.add(new Path(s));
}
job.setInputFormatClass(CompositeInputFormat.class);
job.getConfiguration().set(CompositeInputFormat.JOIN_EXPR,
CompositeInputFormat.compose(op, inputFormatClass,
plist.toArray(new Path[0])));
job.setOutputFormatClass(outputFormatClass);
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1 ;
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " +
(end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
return ret;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Join(), args);
System.exit(res);
}
}
| 7,041 | 38.561798 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* A map/reduce program that estimates the value of Pi
* using a quasi-Monte Carlo (qMC) method.
* Arbitrary integrals can be approximated numerically by qMC methods.
* In this example,
* we use a qMC method to approximate the integral $I = \int_S f(x) dx$,
* where $S=[0,1)^2$ is a unit square,
* $x=(x_1,x_2)$ is a 2-dimensional point,
* and $f$ is a function describing the inscribed circle of the square $S$,
* $f(x)=1$ if $(2x_1-1)^2+(2x_2-1)^2 <= 1$ and $f(x)=0$, otherwise.
* It is easy to see that Pi is equal to $4I$.
* So an approximation of Pi is obtained once $I$ is evaluated numerically.
*
* There are better methods for computing Pi.
* We emphasize numerical approximation of arbitrary integrals in this example.
* For computing many digits of Pi, consider using bbp.
*
* The implementation is discussed below.
*
* Mapper:
* Generate points in a unit square
* and then count points inside/outside of the inscribed circle of the square.
*
* Reducer:
* Accumulate points inside/outside results from the mappers.
*
* Let numTotal = numInside + numOutside.
* The fraction numInside/numTotal is a rational approximation of
* the value (Area of the circle)/(Area of the square) = $I$,
* where the area of the inscribed circle is Pi/4
* and the area of unit square is 1.
* Finally, the estimated value of Pi is 4(numInside/numTotal).
*/
public class QuasiMonteCarlo extends Configured implements Tool {
static final String DESCRIPTION
= "A map/reduce program that estimates Pi using a quasi-Monte Carlo method.";
/** tmp directory for input/output */
static private final String TMP_DIR_PREFIX = QuasiMonteCarlo.class.getSimpleName();
/** 2-dimensional Halton sequence {H(i)},
* where H(i) is a 2-dimensional point and i >= 1 is the index.
* Halton sequence is used to generate sample points for Pi estimation.
*/
private static class HaltonSequence {
/** Bases */
static final int[] P = {2, 3};
/** Maximum number of digits allowed */
static final int[] K = {63, 40};
private long index;
private double[] x;
private double[][] q;
private int[][] d;
/** Initialize to H(startindex),
* so the sequence begins with H(startindex+1).
*/
HaltonSequence(long startindex) {
index = startindex;
x = new double[K.length];
q = new double[K.length][];
d = new int[K.length][];
for(int i = 0; i < K.length; i++) {
q[i] = new double[K[i]];
d[i] = new int[K[i]];
}
for(int i = 0; i < K.length; i++) {
long k = index;
x[i] = 0;
for(int j = 0; j < K[i]; j++) {
q[i][j] = (j == 0? 1.0: q[i][j-1])/P[i];
d[i][j] = (int)(k % P[i]);
k = (k - d[i][j])/P[i];
x[i] += d[i][j] * q[i][j];
}
}
}
/** Compute next point.
* Assume the current point is H(index).
* Compute H(index+1).
*
* @return a 2-dimensional point with coordinates in [0,1)^2
*/
double[] nextPoint() {
index++;
for(int i = 0; i < K.length; i++) {
for(int j = 0; j < K[i]; j++) {
d[i][j]++;
x[i] += q[i][j];
if (d[i][j] < P[i]) {
break;
}
d[i][j] = 0;
x[i] -= (j == 0? 1.0: q[i][j-1]);
}
}
return x;
}
}
/**
* Mapper class for Pi estimation.
* Generate points in a unit square
* and then count points inside/outside of the inscribed circle of the square.
*/
public static class QmcMapper extends
Mapper<LongWritable, LongWritable, BooleanWritable, LongWritable> {
/** Map method.
* @param offset samples starting from the (offset+1)th sample.
* @param size the number of samples for this map
* @param context output {ture->numInside, false->numOutside}
*/
public void map(LongWritable offset,
LongWritable size,
Context context)
throws IOException, InterruptedException {
final HaltonSequence haltonsequence = new HaltonSequence(offset.get());
long numInside = 0L;
long numOutside = 0L;
for(long i = 0; i < size.get(); ) {
//generate points in a unit square
final double[] point = haltonsequence.nextPoint();
//count points inside/outside of the inscribed circle of the square
final double x = point[0] - 0.5;
final double y = point[1] - 0.5;
if (x*x + y*y > 0.25) {
numOutside++;
} else {
numInside++;
}
//report status
i++;
if (i % 1000 == 0) {
context.setStatus("Generated " + i + " samples.");
}
}
//output map results
context.write(new BooleanWritable(true), new LongWritable(numInside));
context.write(new BooleanWritable(false), new LongWritable(numOutside));
}
}
/**
* Reducer class for Pi estimation.
* Accumulate points inside/outside results from the mappers.
*/
public static class QmcReducer extends
Reducer<BooleanWritable, LongWritable, WritableComparable<?>, Writable> {
private long numInside = 0;
private long numOutside = 0;
/**
* Accumulate number of points inside/outside results from the mappers.
* @param isInside Is the points inside?
* @param values An iterator to a list of point counts
* @param context dummy, not used here.
*/
public void reduce(BooleanWritable isInside,
Iterable<LongWritable> values, Context context)
throws IOException, InterruptedException {
if (isInside.get()) {
for (LongWritable val : values) {
numInside += val.get();
}
} else {
for (LongWritable val : values) {
numOutside += val.get();
}
}
}
/**
* Reduce task done, write output to a file.
*/
@Override
public void cleanup(Context context) throws IOException {
//write output to a file
Configuration conf = context.getConfiguration();
Path outDir = new Path(conf.get(FileOutputFormat.OUTDIR));
Path outFile = new Path(outDir, "reduce-out");
FileSystem fileSys = FileSystem.get(conf);
SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf,
outFile, LongWritable.class, LongWritable.class,
CompressionType.NONE);
writer.append(new LongWritable(numInside), new LongWritable(numOutside));
writer.close();
}
}
/**
* Run a map/reduce job for estimating Pi.
*
* @return the estimated value of Pi
*/
public static BigDecimal estimatePi(int numMaps, long numPoints,
Path tmpDir, Configuration conf
) throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance(conf);
//setup job conf
job.setJobName(QuasiMonteCarlo.class.getSimpleName());
job.setJarByClass(QuasiMonteCarlo.class);
job.setInputFormatClass(SequenceFileInputFormat.class);
job.setOutputKeyClass(BooleanWritable.class);
job.setOutputValueClass(LongWritable.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setMapperClass(QmcMapper.class);
job.setReducerClass(QmcReducer.class);
job.setNumReduceTasks(1);
// turn off speculative execution, because DFS doesn't handle
// multiple writers to the same file.
job.setSpeculativeExecution(false);
//setup input/output directories
final Path inDir = new Path(tmpDir, "in");
final Path outDir = new Path(tmpDir, "out");
FileInputFormat.setInputPaths(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
final FileSystem fs = FileSystem.get(conf);
if (fs.exists(tmpDir)) {
throw new IOException("Tmp directory " + fs.makeQualified(tmpDir)
+ " already exists. Please remove it first.");
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Cannot create input directory " + inDir);
}
try {
//generate an input file for each map task
for(int i=0; i < numMaps; ++i) {
final Path file = new Path(inDir, "part"+i);
final LongWritable offset = new LongWritable(i * numPoints);
final LongWritable size = new LongWritable(numPoints);
final SequenceFile.Writer writer = SequenceFile.createWriter(
fs, conf, file,
LongWritable.class, LongWritable.class, CompressionType.NONE);
try {
writer.append(offset, size);
} finally {
writer.close();
}
System.out.println("Wrote input for Map #"+i);
}
//start a map/reduce job
System.out.println("Starting Job");
final long startTime = System.currentTimeMillis();
job.waitForCompletion(true);
if (!job.isSuccessful()) {
System.out.println("Job " + job.getJobID() + " failed!");
System.exit(1);
}
final double duration = (System.currentTimeMillis() - startTime)/1000.0;
System.out.println("Job Finished in " + duration + " seconds");
//read outputs
Path inFile = new Path(outDir, "reduce-out");
LongWritable numInside = new LongWritable();
LongWritable numOutside = new LongWritable();
SequenceFile.Reader reader = new SequenceFile.Reader(fs, inFile, conf);
try {
reader.next(numInside, numOutside);
} finally {
reader.close();
}
//compute estimated value
final BigDecimal numTotal
= BigDecimal.valueOf(numMaps).multiply(BigDecimal.valueOf(numPoints));
return BigDecimal.valueOf(4).setScale(20)
.multiply(BigDecimal.valueOf(numInside.get()))
.divide(numTotal, RoundingMode.HALF_UP);
} finally {
fs.delete(tmpDir, true);
}
}
/**
* Parse arguments and then runs a map/reduce job.
* Print output in standard out.
*
* @return a non-zero if there is an error. Otherwise, return 0.
*/
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("Usage: "+getClass().getName()+" <nMaps> <nSamples>");
ToolRunner.printGenericCommandUsage(System.err);
return 2;
}
final int nMaps = Integer.parseInt(args[0]);
final long nSamples = Long.parseLong(args[1]);
long now = System.currentTimeMillis();
int rand = new Random().nextInt(Integer.MAX_VALUE);
final Path tmpDir = new Path(TMP_DIR_PREFIX + "_" + now + "_" + rand);
System.out.println("Number of Maps = " + nMaps);
System.out.println("Samples per Map = " + nSamples);
System.out.println("Estimated value of Pi is "
+ estimatePi(nMaps, nSamples, tmpDir, getConf()));
return 0;
}
/**
* main method for running it as a stand alone command.
*/
public static void main(String[] argv) throws Exception {
System.exit(ToolRunner.run(null, new QuasiMonteCarlo(), argv));
}
}
| 12,776 | 33.532432 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.util.GenericOptionsParser;
/**
* This is an example Hadoop Map/Reduce application.
* It reads the text input files that must contain two integers per a line.
* The output is sorted by the first and second number and grouped on the
* first number.
*
* To run: bin/hadoop jar build/hadoop-examples.jar secondarysort
* <i>in-dir</i> <i>out-dir</i>
*/
public class SecondarySort {
/**
* Define a pair of integers that are writable.
* They are serialized in a byte comparable format.
*/
public static class IntPair
implements WritableComparable<IntPair> {
private int first = 0;
private int second = 0;
/**
* Set the left and right values.
*/
public void set(int left, int right) {
first = left;
second = right;
}
public int getFirst() {
return first;
}
public int getSecond() {
return second;
}
/**
* Read the two integers.
* Encoded as: MIN_VALUE -> 0, 0 -> -MIN_VALUE, MAX_VALUE-> -1
*/
@Override
public void readFields(DataInput in) throws IOException {
first = in.readInt() + Integer.MIN_VALUE;
second = in.readInt() + Integer.MIN_VALUE;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(first - Integer.MIN_VALUE);
out.writeInt(second - Integer.MIN_VALUE);
}
@Override
public int hashCode() {
return first * 157 + second;
}
@Override
public boolean equals(Object right) {
if (right instanceof IntPair) {
IntPair r = (IntPair) right;
return r.first == first && r.second == second;
} else {
return false;
}
}
/** A Comparator that compares serialized IntPair. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(IntPair.class);
}
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return compareBytes(b1, s1, l1, b2, s2, l2);
}
}
static { // register this comparator
WritableComparator.define(IntPair.class, new Comparator());
}
@Override
public int compareTo(IntPair o) {
if (first != o.first) {
return first < o.first ? -1 : 1;
} else if (second != o.second) {
return second < o.second ? -1 : 1;
} else {
return 0;
}
}
}
/**
* Partition based on the first part of the pair.
*/
public static class FirstPartitioner extends Partitioner<IntPair,IntWritable>{
@Override
public int getPartition(IntPair key, IntWritable value,
int numPartitions) {
return Math.abs(key.getFirst() * 127) % numPartitions;
}
}
/**
* Compare only the first part of the pair, so that reduce is called once
* for each value of the first part.
*/
public static class FirstGroupingComparator
implements RawComparator<IntPair> {
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return WritableComparator.compareBytes(b1, s1, Integer.SIZE/8,
b2, s2, Integer.SIZE/8);
}
@Override
public int compare(IntPair o1, IntPair o2) {
int l = o1.getFirst();
int r = o2.getFirst();
return l == r ? 0 : (l < r ? -1 : 1);
}
}
/**
* Read two integers from each line and generate a key, value pair
* as ((left, right), right).
*/
public static class MapClass
extends Mapper<LongWritable, Text, IntPair, IntWritable> {
private final IntPair key = new IntPair();
private final IntWritable value = new IntWritable();
@Override
public void map(LongWritable inKey, Text inValue,
Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(inValue.toString());
int left = 0;
int right = 0;
if (itr.hasMoreTokens()) {
left = Integer.parseInt(itr.nextToken());
if (itr.hasMoreTokens()) {
right = Integer.parseInt(itr.nextToken());
}
key.set(left, right);
value.set(right);
context.write(key, value);
}
}
}
/**
* A reducer class that just emits the sum of the input values.
*/
public static class Reduce
extends Reducer<IntPair, IntWritable, Text, IntWritable> {
private static final Text SEPARATOR =
new Text("------------------------------------------------");
private final Text first = new Text();
@Override
public void reduce(IntPair key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
context.write(SEPARATOR, null);
first.set(Integer.toString(key.getFirst()));
for(IntWritable value: values) {
context.write(first, value);
}
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: secondarysort <in> <out>");
System.exit(2);
}
Job job = Job.getInstance(conf, "secondary sort");
job.setJarByClass(SecondarySort.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
// group and partition by the first int in the pair
job.setPartitionerClass(FirstPartitioner.class);
job.setGroupingComparatorClass(FirstGroupingComparator.class);
// the map output is IntPair, IntWritable
job.setMapOutputKeyClass(IntPair.class);
job.setMapOutputValueClass(IntWritable.class);
// the reduce output is Text, IntWritable
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 7,826 | 31.6125 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.BufferedOutputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Charsets;
/**
* A map/reduce program that uses Bailey-Borwein-Plouffe to compute exact
* digits of Pi.
* This program is able to calculate digit positions
* lower than a certain limit, which is roughly 10^8.
* If the limit is exceeded,
* the corresponding results may be incorrect due to overflow errors.
* For computing higher bits of Pi, consider using distbbp.
*
* Reference:
*
* [1] David H. Bailey, Peter B. Borwein and Simon Plouffe. On the Rapid
* Computation of Various Polylogarithmic Constants.
* Math. Comp., 66:903-913, 1996.
*/
public class BaileyBorweinPlouffe extends Configured implements Tool {
public static final String DESCRIPTION
= "A map/reduce program that uses Bailey-Borwein-Plouffe to compute exact digits of Pi.";
private static final String NAME = "mapreduce." +
BaileyBorweinPlouffe.class.getSimpleName();
//custom job properties
private static final String WORKING_DIR_PROPERTY = NAME + ".dir";
private static final String HEX_FILE_PROPERTY = NAME + ".hex.file";
private static final String DIGIT_START_PROPERTY = NAME + ".digit.start";
private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
/** Mapper class computing digits of Pi. */
public static class BbpMapper extends
Mapper<LongWritable, IntWritable, LongWritable, BytesWritable> {
/** Compute the (offset+1)th to (offset+length)th digits. */
protected void map(LongWritable offset, IntWritable length,
final Context context) throws IOException, InterruptedException {
LOG.info("offset=" + offset + ", length=" + length);
// compute digits
final byte[] bytes = new byte[length.get() >> 1];
long d = offset.get();
for (int i = 0; i < bytes.length; d += 4) {
final long digits = hexDigits(d);
bytes[i++] = (byte) (digits >> 8);
bytes[i++] = (byte) digits;
}
// output map results
context.write(offset, new BytesWritable(bytes));
}
}
/** Reducer for concatenating map outputs. */
public static class BbpReducer extends
Reducer<LongWritable, BytesWritable, LongWritable, BytesWritable> {
/** Storing hex digits */
private final List<Byte> hex = new ArrayList<Byte>();
/** Concatenate map outputs. */
@Override
protected void reduce(LongWritable offset, Iterable<BytesWritable> values,
Context context) throws IOException, InterruptedException {
// read map outputs
for (BytesWritable bytes : values) {
for (int i = 0; i < bytes.getLength(); i++)
hex.add(bytes.getBytes()[i]);
}
LOG.info("hex.size() = " + hex.size());
}
/** Write output to files. */
@Override
protected void cleanup(Context context
) throws IOException, InterruptedException {
final Configuration conf = context.getConfiguration();
final Path dir = new Path(conf.get(WORKING_DIR_PROPERTY));
final FileSystem fs = dir.getFileSystem(conf);
// write hex output
{
final Path hexfile = new Path(conf.get(HEX_FILE_PROPERTY));
final OutputStream out = new BufferedOutputStream(fs.create(hexfile));
try {
for (byte b : hex)
out.write(b);
} finally {
out.close();
}
}
// If the starting digit is 1,
// the hex value can be converted to decimal value.
if (conf.getInt(DIGIT_START_PROPERTY, 1) == 1) {
final Path outfile = new Path(dir, "pi.txt");
LOG.info("Writing text output to " + outfile);
final OutputStream outputstream = fs.create(outfile);
try {
final PrintWriter out = new PrintWriter(
new OutputStreamWriter(outputstream, Charsets.UTF_8), true);
// write hex text
print(out, hex.iterator(), "Pi = 0x3.", "%02X", 5, 5);
out.println("Total number of hexadecimal digits is "
+ 2 * hex.size() + ".");
// write decimal text
final Fraction dec = new Fraction(hex);
final int decDigits = 2 * hex.size(); // TODO: this is conservative.
print(out, new Iterator<Integer>() {
private int i = 0;
public boolean hasNext() {
return i < decDigits;
}
public Integer next() {
i++;
return dec.times10();
}
public void remove() {
}
}, "Pi = 3.", "%d", 10, 5);
out.println("Total number of decimal digits is " + decDigits + ".");
} finally {
outputstream.close();
}
}
}
}
/** Print out elements in a nice format. */
private static <T> void print(PrintWriter out, Iterator<T> iterator,
String prefix, String format, int elementsPerGroup, int groupsPerLine) {
final StringBuilder sb = new StringBuilder("\n");
for (int i = 0; i < prefix.length(); i++)
sb.append(" ");
final String spaces = sb.toString();
out.print("\n" + prefix);
for (int i = 0; iterator.hasNext(); i++) {
if (i > 0 && i % elementsPerGroup == 0)
out.print((i / elementsPerGroup) % groupsPerLine == 0 ? spaces : " ");
out.print(String.format(format, iterator.next()));
}
out.println();
}
/** Input split for the {@link BbpInputFormat}. */
public static class BbpSplit extends InputSplit implements Writable {
private final static String[] EMPTY = {};
private long offset;
private int size;
/** Public default constructor for the Writable interface. */
public BbpSplit() {
}
private BbpSplit(int i, long offset, int size) {
LOG.info("Map #" + i + ": workload=" + workload(offset, size)
+ ", offset=" + offset + ", size=" + size);
this.offset = offset;
this.size = size;
}
private long getOffset() {
return offset;
}
/** {@inheritDoc} */
public long getLength() {
return size;
}
/** No location is needed. */
public String[] getLocations() {
return EMPTY;
}
/** {@inheritDoc} */
public void readFields(DataInput in) throws IOException {
offset = in.readLong();
size = in.readInt();
}
/** {@inheritDoc} */
public void write(DataOutput out) throws IOException {
out.writeLong(offset);
out.writeInt(size);
}
}
/**
* Input format for the {@link BbpMapper}.
* Keys and values represent offsets and sizes, respectively.
*/
public static class BbpInputFormat
extends InputFormat<LongWritable, IntWritable> {
/** {@inheritDoc} */
public List<InputSplit> getSplits(JobContext context) {
//get the property values
final int startDigit = context.getConfiguration().getInt(
DIGIT_START_PROPERTY, 1);
final int nDigits = context.getConfiguration().getInt(
DIGIT_SIZE_PROPERTY, 100);
final int nMaps = context.getConfiguration().getInt(
DIGIT_PARTS_PROPERTY, 1);
//create splits
final List<InputSplit> splits = new ArrayList<InputSplit>(nMaps);
final int[] parts = partition(startDigit - 1, nDigits, nMaps);
for (int i = 0; i < parts.length; ++i) {
final int k = i < parts.length - 1 ? parts[i+1]: nDigits+startDigit-1;
splits.add(new BbpSplit(i, parts[i], k - parts[i]));
}
return splits;
}
/** {@inheritDoc} */
public RecordReader<LongWritable, IntWritable> createRecordReader(
InputSplit generic, TaskAttemptContext context) {
final BbpSplit split = (BbpSplit)generic;
//return a record reader
return new RecordReader<LongWritable, IntWritable>() {
boolean done = false;
public void initialize(InputSplit split, TaskAttemptContext context) {
}
public boolean nextKeyValue() {
//Each record only contains one key.
return !done ? done = true : false;
}
public LongWritable getCurrentKey() {
return new LongWritable(split.getOffset());
}
public IntWritable getCurrentValue() {
return new IntWritable((int)split.getLength());
}
public float getProgress() {
return done? 1f: 0f;
}
public void close() {
}
};
}
}
/** Create and setup a job */
private static Job createJob(String name, Configuration conf
) throws IOException {
final Job job = Job.getInstance(conf, NAME + "_" + name);
final Configuration jobconf = job.getConfiguration();
job.setJarByClass(BaileyBorweinPlouffe.class);
// setup mapper
job.setMapperClass(BbpMapper.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(BytesWritable.class);
// setup reducer
job.setReducerClass(BbpReducer.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(BytesWritable.class);
job.setNumReduceTasks(1);
// setup input
job.setInputFormatClass(BbpInputFormat.class);
// disable task timeout
jobconf.setLong(MRJobConfig.TASK_TIMEOUT, 0);
// do not use speculative execution
jobconf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
jobconf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
return job;
}
/** Run a map/reduce job to compute Pi. */
private static void compute(int startDigit, int nDigits, int nMaps,
String workingDir, Configuration conf, PrintStream out
) throws IOException {
final String name = startDigit + "_" + nDigits;
//setup wroking directory
out.println("Working Directory = " + workingDir);
out.println();
final FileSystem fs = FileSystem.get(conf);
final Path dir = fs.makeQualified(new Path(workingDir));
if (fs.exists(dir)) {
throw new IOException("Working directory " + dir
+ " already exists. Please remove it first.");
} else if (!fs.mkdirs(dir)) {
throw new IOException("Cannot create working directory " + dir);
}
out.println("Start Digit = " + startDigit);
out.println("Number of Digits = " + nDigits);
out.println("Number of Maps = " + nMaps);
// setup a job
final Job job = createJob(name, conf);
final Path hexfile = new Path(dir, "pi_" + name + ".hex");
FileOutputFormat.setOutputPath(job, new Path(dir, "out"));
// setup custom properties
job.getConfiguration().set(WORKING_DIR_PROPERTY, dir.toString());
job.getConfiguration().set(HEX_FILE_PROPERTY, hexfile.toString());
job.getConfiguration().setInt(DIGIT_START_PROPERTY, startDigit);
job.getConfiguration().setInt(DIGIT_SIZE_PROPERTY, nDigits);
job.getConfiguration().setInt(DIGIT_PARTS_PROPERTY, nMaps);
// start a map/reduce job
out.println("\nStarting Job ...");
final long startTime = System.currentTimeMillis();
try {
if (!job.waitForCompletion(true)) {
out.println("Job failed.");
System.exit(1);
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
final double duration = (System.currentTimeMillis() - startTime)/1000.0;
out.println("Duration is " + duration + " seconds.");
}
out.println("Output file: " + hexfile);
}
/**
* Parse arguments and then runs a map/reduce job.
* @return a non-zero value if there is an error. Otherwise, return 0.
*/
public int run(String[] args) throws IOException {
if (args.length != 4) {
System.err.println("Usage: bbp "
+ " <startDigit> <nDigits> <nMaps> <workingDir>");
ToolRunner.printGenericCommandUsage(System.err);
return -1;
}
final int startDigit = Integer.parseInt(args[0]);
final int nDigits = Integer.parseInt(args[1]);
final int nMaps = Integer.parseInt(args[2]);
final String workingDir = args[3];
if (startDigit <= 0) {
throw new IllegalArgumentException("startDigit = " + startDigit+" <= 0");
} else if (nDigits <= 0) {
throw new IllegalArgumentException("nDigits = " + nDigits + " <= 0");
} else if (nDigits % BBP_HEX_DIGITS != 0) {
throw new IllegalArgumentException("nDigits = " + nDigits
+ " is not a multiple of " + BBP_HEX_DIGITS);
} else if (nDigits - 1L + startDigit > IMPLEMENTATION_LIMIT + BBP_HEX_DIGITS) {
throw new UnsupportedOperationException("nDigits - 1 + startDigit = "
+ (nDigits - 1L + startDigit)
+ " > IMPLEMENTATION_LIMIT + BBP_HEX_DIGITS,"
+ ", where IMPLEMENTATION_LIMIT=" + IMPLEMENTATION_LIMIT
+ "and BBP_HEX_DIGITS=" + BBP_HEX_DIGITS);
} else if (nMaps <= 0) {
throw new IllegalArgumentException("nMaps = " + nMaps + " <= 0");
}
compute(startDigit, nDigits, nMaps, workingDir, getConf(), System.out);
return 0;
}
/** The main method for running it as a stand alone command. */
public static void main(String[] argv) throws Exception {
System.exit(ToolRunner.run(null, new BaileyBorweinPlouffe(), argv));
}
/////////////////////////////////////////////////////////////////////
// static fields and methods for Bailey-Borwein-Plouffe algorithm. //
/////////////////////////////////////////////////////////////////////
/** Limitation of the program.
* The program may return incorrect results if the limit is exceeded.
* The default value is 10^8.
* The program probably can handle some higher values such as 2^28.
*/
private static final long IMPLEMENTATION_LIMIT = 100000000;
private static final long ACCURACY_BIT = 32;
private static final long BBP_HEX_DIGITS = 4;
private static final long BBP_MULTIPLIER = 1 << (4 * BBP_HEX_DIGITS);
/**
* Compute the exact (d+1)th to (d+{@link #BBP_HEX_DIGITS})th
* hex digits of pi.
*/
static long hexDigits(final long d) {
if (d < 0) {
throw new IllegalArgumentException("d = " + d + " < 0");
} else if (d > IMPLEMENTATION_LIMIT) {
throw new IllegalArgumentException("d = " + d
+ " > IMPLEMENTATION_LIMIT = " + IMPLEMENTATION_LIMIT);
}
final double s1 = sum(1, d);
final double s4 = sum(4, d);
final double s5 = sum(5, d);
final double s6 = sum(6, d);
double pi = s1 + s1;
if (pi >= 1)
pi--;
pi *= 2;
if (pi >= 1)
pi--;
pi -= s4;
if (pi < 0)
pi++;
pi -= s4;
if (pi < 0)
pi++;
pi -= s5;
if (pi < 0)
pi++;
pi -= s6;
if (pi < 0)
pi++;
return (long) (pi * BBP_MULTIPLIER);
}
/**
* Approximate the fraction part of
* $16^d \sum_{k=0}^\infty \frac{16^{d-k}}{8k+j}$
* for d > 0 and j = 1, 4, 5, 6.
*/
private static double sum(final long j, final long d) {
long k = j == 1 ? 1 : 0;
double s = 0;
if (k <= d) {
s = 1.0 / ((d << 3) | j);
for (; k < d; k++) {
final long n = (k << 3) | j;
s += mod((d - k) << 2, n) * 1.0 / n;
if (s >= 1)
s--;
}
k++;
}
if (k >= 1L << (ACCURACY_BIT - 7))
return s;
for (;; k++) {
final long n = (k << 3) | j;
final long shift = (k - d) << 2;
if (ACCURACY_BIT <= shift || 1L << (ACCURACY_BIT - shift) < n) {
return s;
}
s += 1.0 / (n << shift);
if (s >= 1)
s--;
}
}
/** Compute $2^e \mod n$ for e > 0, n > 2 */
static long mod(final long e, final long n) {
long mask = (e & 0xFFFFFFFF00000000L) == 0 ? 0x00000000FFFFFFFFL
: 0xFFFFFFFF00000000L;
mask &= (e & 0xFFFF0000FFFF0000L & mask) == 0 ? 0x0000FFFF0000FFFFL
: 0xFFFF0000FFFF0000L;
mask &= (e & 0xFF00FF00FF00FF00L & mask) == 0 ? 0x00FF00FF00FF00FFL
: 0xFF00FF00FF00FF00L;
mask &= (e & 0xF0F0F0F0F0F0F0F0L & mask) == 0 ? 0x0F0F0F0F0F0F0F0FL
: 0xF0F0F0F0F0F0F0F0L;
mask &= (e & 0xCCCCCCCCCCCCCCCCL & mask) == 0 ? 0x3333333333333333L
: 0xCCCCCCCCCCCCCCCCL;
mask &= (e & 0xAAAAAAAAAAAAAAAAL & mask) == 0 ? 0x5555555555555555L
: 0xAAAAAAAAAAAAAAAAL;
long r = 2;
for (mask >>= 1; mask > 0; mask >>= 1) {
r *= r;
r %= n;
if ((e & mask) != 0) {
r += r;
if (r >= n)
r -= n;
}
}
return r;
}
/** Represent a number x in hex for 1 > x >= 0 */
private static class Fraction {
private final int[] integers; // only use 24-bit
private int first = 0; // index to the first non-zero integer
/** Construct a fraction represented by the bytes. */
Fraction(List<Byte> bytes) {
integers = new int[(bytes.size() + 2) / 3];
for (int i = 0; i < bytes.size(); i++) {
final int b = 0xFF & bytes.get(i);
integers[integers.length - 1 - i / 3] |= b << ((2 - i % 3) << 3);
}
skipZeros();
}
/**
* Compute y = 10*x and then set x to the fraction part of y, where x is the
* fraction represented by this object.
* @return integer part of y
*/
int times10() {
int carry = 0;
for (int i = first; i < integers.length; i++) {
integers[i] <<= 1;
integers[i] += carry + (integers[i] << 2);
carry = integers[i] >> 24;
integers[i] &= 0xFFFFFF;
}
skipZeros();
return carry;
}
private void skipZeros() {
for(; first < integers.length && integers[first] == 0; first++)
;
}
}
/**
* Partition input so that the workload of each part is
* approximately the same.
*/
static int[] partition(final int offset, final int size, final int nParts) {
final int[] parts = new int[nParts];
final long total = workload(offset, size);
final int remainder = offset % 4;
parts[0] = offset;
for (int i = 1; i < nParts; i++) {
final long target = offset + i*(total/nParts) + i*(total%nParts)/nParts;
//search the closest value
int low = parts[i - 1];
int high = offset + size;
for (; high > low + 4;) {
final int mid = (high + low - 2 * remainder) / 8 * 4 + remainder;
final long midvalue = workload(mid);
if (midvalue == target)
high = low = mid;
else if (midvalue > target)
high = mid;
else
low = mid;
}
parts[i] = high == low? high:
workload(high)-target > target-workload(low)?
low: high;
}
return parts;
}
private static final long MAX_N = 4294967295L; // prevent overflow
/** Estimate the workload for input size n (in some unit). */
private static long workload(final long n) {
if (n < 0) {
throw new IllegalArgumentException("n = " + n + " < 0");
} else if (n > MAX_N) {
throw new IllegalArgumentException("n = " + n + " > MAX_N = " + MAX_N);
}
return (n & 1L) == 0L ? (n >> 1) * (n + 1) : n * ((n + 1) >> 1);
}
private static long workload(long offset, long size) {
return workload(offset + size) - workload(offset);
}
}
| 21,238 | 31.826893 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/ExampleDriver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import org.apache.hadoop.examples.dancing.DistributedPentomino;
import org.apache.hadoop.examples.dancing.Sudoku;
import org.apache.hadoop.examples.pi.DistBbp;
import org.apache.hadoop.examples.terasort.TeraGen;
import org.apache.hadoop.examples.terasort.TeraSort;
import org.apache.hadoop.examples.terasort.TeraValidate;
import org.apache.hadoop.util.ProgramDriver;
/**
* A description of an example program based on its class and a
* human-readable description.
*/
public class ExampleDriver {
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("wordcount", WordCount.class,
"A map/reduce program that counts the words in the input files.");
pgd.addClass("wordmean", WordMean.class,
"A map/reduce program that counts the average length of the words in the input files.");
pgd.addClass("wordmedian", WordMedian.class,
"A map/reduce program that counts the median length of the words in the input files.");
pgd.addClass("wordstandarddeviation", WordStandardDeviation.class,
"A map/reduce program that counts the standard deviation of the length of the words in the input files.");
pgd.addClass("aggregatewordcount", AggregateWordCount.class,
"An Aggregate based map/reduce program that counts the words in the input files.");
pgd.addClass("aggregatewordhist", AggregateWordHistogram.class,
"An Aggregate based map/reduce program that computes the histogram of the words in the input files.");
pgd.addClass("grep", Grep.class,
"A map/reduce program that counts the matches of a regex in the input.");
pgd.addClass("randomwriter", RandomWriter.class,
"A map/reduce program that writes 10GB of random data per node.");
pgd.addClass("randomtextwriter", RandomTextWriter.class,
"A map/reduce program that writes 10GB of random textual data per node.");
pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer.");
pgd.addClass("pi", QuasiMonteCarlo.class, QuasiMonteCarlo.DESCRIPTION);
pgd.addClass("bbp", BaileyBorweinPlouffe.class, BaileyBorweinPlouffe.DESCRIPTION);
pgd.addClass("distbbp", DistBbp.class, DistBbp.DESCRIPTION);
pgd.addClass("pentomino", DistributedPentomino.class,
"A map/reduce tile laying program to find solutions to pentomino problems.");
pgd.addClass("secondarysort", SecondarySort.class,
"An example defining a secondary sort to the reduce.");
pgd.addClass("sudoku", Sudoku.class, "A sudoku solver.");
pgd.addClass("join", Join.class, "A job that effects a join over sorted, equally partitioned datasets");
pgd.addClass("multifilewc", MultiFileWordCount.class, "A job that counts words from several files.");
pgd.addClass("dbcount", DBCountPageView.class, "An example job that count the pageview counts from a database.");
pgd.addClass("teragen", TeraGen.class, "Generate data for the terasort");
pgd.addClass("terasort", TeraSort.class, "Run the terasort");
pgd.addClass("teravalidate", TeraValidate.class, "Checking results of terasort");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
}
| 4,301 | 50.214286 | 125 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java
|
package org.apache.hadoop.examples;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Charsets;
public class WordMean extends Configured implements Tool {
private double mean = 0;
private final static Text COUNT = new Text("count");
private final static Text LENGTH = new Text("length");
private final static LongWritable ONE = new LongWritable(1);
/**
* Maps words from line of text into 2 key-value pairs; one key-value pair for
* counting the word, another for counting its length.
*/
public static class WordMeanMapper extends
Mapper<Object, Text, Text, LongWritable> {
private LongWritable wordLen = new LongWritable();
/**
* Emits 2 key-value pairs for counting the word and its length. Outputs are
* (Text, LongWritable).
*
* @param value
* This will be a line of text coming in from our input file.
*/
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String string = itr.nextToken();
this.wordLen.set(string.length());
context.write(LENGTH, this.wordLen);
context.write(COUNT, ONE);
}
}
}
/**
* Performs integer summation of all the values for each key.
*/
public static class WordMeanReducer extends
Reducer<Text, LongWritable, Text, LongWritable> {
private LongWritable sum = new LongWritable();
/**
* Sums all the individual values within the iterator and writes them to the
* same key.
*
* @param key
* This will be one of 2 constants: LENGTH_STR or COUNT_STR.
* @param values
* This will be an iterator of all the values associated with that
* key.
*/
public void reduce(Text key, Iterable<LongWritable> values, Context context)
throws IOException, InterruptedException {
int theSum = 0;
for (LongWritable val : values) {
theSum += val.get();
}
sum.set(theSum);
context.write(key, sum);
}
}
/**
* Reads the output file and parses the summation of lengths, and the word
* count, to perform a quick calculation of the mean.
*
* @param path
* The path to find the output file in. Set in main to the output
* directory.
* @throws IOException
* If it cannot access the output directory, we throw an exception.
*/
private double readAndCalcMean(Path path, Configuration conf)
throws IOException {
FileSystem fs = FileSystem.get(conf);
Path file = new Path(path, "part-r-00000");
if (!fs.exists(file))
throw new IOException("Output not found!");
BufferedReader br = null;
// average = total sum / number of elements;
try {
br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8));
long count = 0;
long length = 0;
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
// grab type
String type = st.nextToken();
// differentiate
if (type.equals(COUNT.toString())) {
String countLit = st.nextToken();
count = Long.parseLong(countLit);
} else if (type.equals(LENGTH.toString())) {
String lengthLit = st.nextToken();
length = Long.parseLong(lengthLit);
}
}
double theMean = (((double) length) / ((double) count));
System.out.println("The mean is: " + theMean);
return theMean;
} finally {
if (br != null) {
br.close();
}
}
}
public static void main(String[] args) throws Exception {
ToolRunner.run(new Configuration(), new WordMean(), args);
}
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("Usage: wordmean <in> <out>");
return 0;
}
Configuration conf = getConf();
Job job = Job.getInstance(conf, "word mean");
job.setJarByClass(WordMean.class);
job.setMapperClass(WordMeanMapper.class);
job.setCombinerClass(WordMeanReducer.class);
job.setReducerClass(WordMeanReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
Path outputpath = new Path(args[1]);
FileOutputFormat.setOutputPath(job, outputpath);
boolean result = job.waitForCompletion(true);
mean = readAndCalcMean(outputpath, conf);
return (result ? 0 : 1);
}
/**
* Only valuable after run() called.
*
* @return Returns the mean value.
*/
public double getMean() {
return mean;
}
}
| 6,298 | 30.653266 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.map.InverseMapper;
import org.apache.hadoop.mapreduce.lib.map.RegexMapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/* Extracts matching regexs from input files and counts them. */
public class Grep extends Configured implements Tool {
private Grep() {} // singleton
public int run(String[] args) throws Exception {
if (args.length < 3) {
System.out.println("Grep <inDir> <outDir> <regex> [<group>]");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
Path tempDir =
new Path("grep-temp-"+
Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
Configuration conf = getConf();
conf.set(RegexMapper.PATTERN, args[2]);
if (args.length == 4)
conf.set(RegexMapper.GROUP, args[3]);
Job grepJob = Job.getInstance(conf);
try {
grepJob.setJobName("grep-search");
grepJob.setJarByClass(Grep.class);
FileInputFormat.setInputPaths(grepJob, args[0]);
grepJob.setMapperClass(RegexMapper.class);
grepJob.setCombinerClass(LongSumReducer.class);
grepJob.setReducerClass(LongSumReducer.class);
FileOutputFormat.setOutputPath(grepJob, tempDir);
grepJob.setOutputFormatClass(SequenceFileOutputFormat.class);
grepJob.setOutputKeyClass(Text.class);
grepJob.setOutputValueClass(LongWritable.class);
grepJob.waitForCompletion(true);
Job sortJob = Job.getInstance(conf);
sortJob.setJobName("grep-sort");
sortJob.setJarByClass(Grep.class);
FileInputFormat.setInputPaths(sortJob, tempDir);
sortJob.setInputFormatClass(SequenceFileInputFormat.class);
sortJob.setMapperClass(InverseMapper.class);
sortJob.setNumReduceTasks(1); // write a single file
FileOutputFormat.setOutputPath(sortJob, new Path(args[1]));
sortJob.setSortComparatorClass( // sort by decreasing freq
LongWritable.DecreasingComparator.class);
sortJob.waitForCompletion(true);
}
finally {
FileSystem.get(conf).delete(tempDir, true);
}
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Grep(), args);
System.exit(res);
}
}
| 3,828 | 34.453704 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java
|
package org.apache.hadoop.examples;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Charsets;
public class WordStandardDeviation extends Configured implements Tool {
private double stddev = 0;
private final static Text LENGTH = new Text("length");
private final static Text SQUARE = new Text("square");
private final static Text COUNT = new Text("count");
private final static LongWritable ONE = new LongWritable(1);
/**
* Maps words from line of text into 3 key-value pairs; one key-value pair for
* counting the word, one for counting its length, and one for counting the
* square of its length.
*/
public static class WordStandardDeviationMapper extends
Mapper<Object, Text, Text, LongWritable> {
private LongWritable wordLen = new LongWritable();
private LongWritable wordLenSq = new LongWritable();
/**
* Emits 3 key-value pairs for counting the word, its length, and the
* squares of its length. Outputs are (Text, LongWritable).
*
* @param value
* This will be a line of text coming in from our input file.
*/
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String string = itr.nextToken();
this.wordLen.set(string.length());
// the square of an integer is an integer...
this.wordLenSq.set((long) Math.pow(string.length(), 2.0));
context.write(LENGTH, this.wordLen);
context.write(SQUARE, this.wordLenSq);
context.write(COUNT, ONE);
}
}
}
/**
* Performs integer summation of all the values for each key.
*/
public static class WordStandardDeviationReducer extends
Reducer<Text, LongWritable, Text, LongWritable> {
private LongWritable val = new LongWritable();
/**
* Sums all the individual values within the iterator and writes them to the
* same key.
*
* @param key
* This will be one of 2 constants: LENGTH_STR, COUNT_STR, or
* SQUARE_STR.
* @param values
* This will be an iterator of all the values associated with that
* key.
*/
public void reduce(Text key, Iterable<LongWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (LongWritable value : values) {
sum += value.get();
}
val.set(sum);
context.write(key, val);
}
}
/**
* Reads the output file and parses the summation of lengths, the word count,
* and the lengths squared, to perform a quick calculation of the standard
* deviation.
*
* @param path
* The path to find the output file in. Set in main to the output
* directory.
* @throws IOException
* If it cannot access the output directory, we throw an exception.
*/
private double readAndCalcStdDev(Path path, Configuration conf)
throws IOException {
FileSystem fs = FileSystem.get(conf);
Path file = new Path(path, "part-r-00000");
if (!fs.exists(file))
throw new IOException("Output not found!");
double stddev = 0;
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8));
long count = 0;
long length = 0;
long square = 0;
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
// grab type
String type = st.nextToken();
// differentiate
if (type.equals(COUNT.toString())) {
String countLit = st.nextToken();
count = Long.parseLong(countLit);
} else if (type.equals(LENGTH.toString())) {
String lengthLit = st.nextToken();
length = Long.parseLong(lengthLit);
} else if (type.equals(SQUARE.toString())) {
String squareLit = st.nextToken();
square = Long.parseLong(squareLit);
}
}
// average = total sum / number of elements;
double mean = (((double) length) / ((double) count));
// standard deviation = sqrt((sum(lengths ^ 2)/count) - (mean ^ 2))
mean = Math.pow(mean, 2.0);
double term = (((double) square / ((double) count)));
stddev = Math.sqrt((term - mean));
System.out.println("The standard deviation is: " + stddev);
} finally {
if (br != null) {
br.close();
}
}
return stddev;
}
public static void main(String[] args) throws Exception {
ToolRunner.run(new Configuration(), new WordStandardDeviation(),
args);
}
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("Usage: wordstddev <in> <out>");
return 0;
}
Configuration conf = getConf();
Job job = Job.getInstance(conf, "word stddev");
job.setJarByClass(WordStandardDeviation.class);
job.setMapperClass(WordStandardDeviationMapper.class);
job.setCombinerClass(WordStandardDeviationReducer.class);
job.setReducerClass(WordStandardDeviationReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
Path outputpath = new Path(args[1]);
FileOutputFormat.setOutputPath(job, outputpath);
boolean result = job.waitForCompletion(true);
// read output and calculate standard deviation
stddev = readAndCalcStdDev(outputpath, conf);
return (result ? 0 : 1);
}
public double getStandardDeviation() {
return stddev;
}
}
| 7,224 | 32.920188 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* MultiFileWordCount is an example to demonstrate the usage of
* MultiFileInputFormat. This examples counts the occurrences of
* words in the text files under the given input directory.
*/
public class MultiFileWordCount extends Configured implements Tool {
/**
* This record keeps <filename,offset> pairs.
*/
public static class WordOffset implements WritableComparable {
private long offset;
private String fileName;
public void readFields(DataInput in) throws IOException {
this.offset = in.readLong();
this.fileName = Text.readString(in);
}
public void write(DataOutput out) throws IOException {
out.writeLong(offset);
Text.writeString(out, fileName);
}
public int compareTo(Object o) {
WordOffset that = (WordOffset)o;
int f = this.fileName.compareTo(that.fileName);
if(f == 0) {
return (int)Math.signum((double)(this.offset - that.offset));
}
return f;
}
@Override
public boolean equals(Object obj) {
if(obj instanceof WordOffset)
return this.compareTo(obj) == 0;
return false;
}
@Override
public int hashCode() {
assert false : "hashCode not designed";
return 42; //an arbitrary constant
}
}
/**
* To use {@link CombineFileInputFormat}, one should extend it, to return a
* (custom) {@link RecordReader}. CombineFileInputFormat uses
* {@link CombineFileSplit}s.
*/
public static class MyInputFormat
extends CombineFileInputFormat<WordOffset, Text> {
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader<WordOffset, Text>(
(CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
}
/**
* RecordReader is responsible from extracting records from a chunk
* of the CombineFileSplit.
*/
public static class CombineFileLineRecordReader
extends RecordReader<WordOffset, Text> {
private long startOffset; //offset of the chunk;
private long end; //end of the chunk;
private long pos; // current pos
private FileSystem fs;
private Path path;
private WordOffset key;
private Text value;
private FSDataInputStream fileIn;
private LineReader reader;
public CombineFileLineRecordReader(CombineFileSplit split,
TaskAttemptContext context, Integer index) throws IOException {
this.path = split.getPath(index);
fs = this.path.getFileSystem(context.getConfiguration());
this.startOffset = split.getOffset(index);
this.end = startOffset + split.getLength(index);
boolean skipFirstLine = false;
//open the file
fileIn = fs.open(path);
if (startOffset != 0) {
skipFirstLine = true;
--startOffset;
fileIn.seek(startOffset);
}
reader = new LineReader(fileIn);
if (skipFirstLine) { // skip first line and re-establish "startOffset".
startOffset += reader.readLine(new Text(), 0,
(int)Math.min((long)Integer.MAX_VALUE, end - startOffset));
}
this.pos = startOffset;
}
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
}
public void close() throws IOException { }
public float getProgress() throws IOException {
if (startOffset == end) {
return 0.0f;
} else {
return Math.min(1.0f, (pos - startOffset) / (float)(end - startOffset));
}
}
public boolean nextKeyValue() throws IOException {
if (key == null) {
key = new WordOffset();
key.fileName = path.getName();
}
key.offset = pos;
if (value == null) {
value = new Text();
}
int newSize = 0;
if (pos < end) {
newSize = reader.readLine(value);
pos += newSize;
}
if (newSize == 0) {
key = null;
value = null;
return false;
} else {
return true;
}
}
public WordOffset getCurrentKey()
throws IOException, InterruptedException {
return key;
}
public Text getCurrentValue() throws IOException, InterruptedException {
return value;
}
}
/**
* This Mapper is similar to the one in {@link WordCount.TokenizerMapper}.
*/
public static class MapClass extends
Mapper<WordOffset, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(WordOffset key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer itr = new StringTokenizer(line);
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
private void printUsage() {
System.out.println("Usage : multifilewc <input_dir> <output>" );
}
public int run(String[] args) throws Exception {
if(args.length < 2) {
printUsage();
return 2;
}
Job job = Job.getInstance(getConf());
job.setJobName("MultiFileWordCount");
job.setJarByClass(MultiFileWordCount.class);
//set the InputFormat of the job to our InputFormat
job.setInputFormatClass(MyInputFormat.class);
// the keys are words (strings)
job.setOutputKeyClass(Text.class);
// the values are counts (ints)
job.setOutputValueClass(IntWritable.class);
//use the defined mapper
job.setMapperClass(MapClass.class);
//use the WordCount Reducer
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
FileInputFormat.addInputPaths(job, args[0]);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
return job.waitForCompletion(true) ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new MultiFileWordCount(), args);
System.exit(ret);
}
}
| 8,126 | 30.019084 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This program uses map/reduce to just run a distributed job where there is
* no interaction between the tasks and each task writes a large unsorted
* random sequence of words.
* In order for this program to generate data for terasort with a 5-10 words
* per key and 20-100 words per value, have the following config:
* <pre>{@code
* <?xml version="1.0"?>
* <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
* <configuration>
* <property>
* <name>mapreduce.randomtextwriter.minwordskey</name>
* <value>5</value>
* </property>
* <property>
* <name>mapreduce.randomtextwriter.maxwordskey</name>
* <value>10</value>
* </property>
* <property>
* <name>mapreduce.randomtextwriter.minwordsvalue</name>
* <value>20</value>
* </property>
* <property>
* <name>mapreduce.randomtextwriter.maxwordsvalue</name>
* <value>100</value>
* </property>
* <property>
* <name>mapreduce.randomtextwriter.totalbytes</name>
* <value>1099511627776</value>
* </property>
* </configuration>}</pre>
*
* Equivalently, {@link RandomTextWriter} also supports all the above options
* and ones supported by {@link Tool} via the command-line.
*
* To run: bin/hadoop jar hadoop-${version}-examples.jar randomtextwriter
* [-outFormat <i>output format class</i>] <i>output</i>
*/
public class RandomTextWriter extends Configured implements Tool {
public static final String TOTAL_BYTES =
"mapreduce.randomtextwriter.totalbytes";
public static final String BYTES_PER_MAP =
"mapreduce.randomtextwriter.bytespermap";
public static final String MAPS_PER_HOST =
"mapreduce.randomtextwriter.mapsperhost";
public static final String MAX_VALUE = "mapreduce.randomtextwriter.maxwordsvalue";
public static final String MIN_VALUE = "mapreduce.randomtextwriter.minwordsvalue";
public static final String MIN_KEY = "mapreduce.randomtextwriter.minwordskey";
public static final String MAX_KEY = "mapreduce.randomtextwriter.maxwordskey";
static int printUsage() {
System.out.println("randomtextwriter " +
"[-outFormat <output format class>] " +
"<output>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
/**
* User counters
*/
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
static class RandomTextMapper extends Mapper<Text, Text, Text, Text> {
private long numBytesToWrite;
private int minWordsInKey;
private int wordsInKeyRange;
private int minWordsInValue;
private int wordsInValueRange;
private Random random = new Random();
/**
* Save the configuration value that we need to write the data.
*/
public void setup(Context context) {
Configuration conf = context.getConfiguration();
numBytesToWrite = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
minWordsInKey = conf.getInt(MIN_KEY, 5);
wordsInKeyRange = (conf.getInt(MAX_KEY, 10) - minWordsInKey);
minWordsInValue = conf.getInt(MIN_VALUE, 10);
wordsInValueRange = (conf.getInt(MAX_VALUE, 100) - minWordsInValue);
}
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(Text key, Text value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
// Generate the key/value
int noWordsKey = minWordsInKey +
(wordsInKeyRange != 0 ? random.nextInt(wordsInKeyRange) : 0);
int noWordsValue = minWordsInValue +
(wordsInValueRange != 0 ? random.nextInt(wordsInValueRange) : 0);
Text keyWords = generateSentence(noWordsKey);
Text valueWords = generateSentence(noWordsValue);
// Write the sentence
context.write(keyWords, valueWords);
numBytesToWrite -= (keyWords.getLength() + valueWords.getLength());
// Update counters, progress etc.
context.getCounter(Counters.BYTES_WRITTEN).increment(
keyWords.getLength() + valueWords.getLength());
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
private Text generateSentence(int noWords) {
StringBuffer sentence = new StringBuffer();
String space = " ";
for (int i=0; i < noWords; ++i) {
sentence.append(words[random.nextInt(words.length)]);
sentence.append(space);
}
return new Text(sentence.toString());
}
}
/**
* This is the main routine for launching a distributed random write job.
* It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
* The reduce doesn't do anything.
*
* @throws IOException
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
return printUsage();
}
Configuration conf = getConf();
JobClient client = new JobClient(conf);
ClusterStatus cluster = client.getClusterStatus();
int numMapsPerHost = conf.getInt(MAPS_PER_HOST, 10);
long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
if (numBytesToWritePerMap == 0) {
System.err.println("Cannot have " + BYTES_PER_MAP +" set to 0");
return -2;
}
long totalBytesToWrite = conf.getLong(TOTAL_BYTES,
numMapsPerHost*numBytesToWritePerMap*cluster.getTaskTrackers());
int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap);
if (numMaps == 0 && totalBytesToWrite > 0) {
numMaps = 1;
conf.setLong(BYTES_PER_MAP, totalBytesToWrite);
}
conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
Job job = Job.getInstance(conf);
job.setJarByClass(RandomTextWriter.class);
job.setJobName("random-text-writer");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(RandomWriter.RandomInputFormat.class);
job.setMapperClass(RandomTextMapper.class);
Class<? extends OutputFormat> outputFormatClass =
SequenceFileOutputFormat.class;
List<String> otherArgs = new ArrayList<String>();
for(int i=0; i < args.length; ++i) {
try {
if ("-outFormat".equals(args[i])) {
outputFormatClass =
Class.forName(args[++i]).asSubclass(OutputFormat.class);
} else {
otherArgs.add(args[i]);
}
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " +
args[i-1]);
return printUsage(); // exits
}
}
job.setOutputFormatClass(outputFormatClass);
FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(0)));
System.out.println("Running " + numMaps + " maps.");
// reducer NONE
job.setNumReduceTasks(0);
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return ret;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new RandomTextWriter(), args);
System.exit(res);
}
/**
* A random list of 1000 words from /usr/share/dict/words
*/
private static String[] words = {
"diurnalness", "Homoiousian",
"spiranthic", "tetragynian",
"silverhead", "ungreat",
"lithograph", "exploiter",
"physiologian", "by",
"hellbender", "Filipendula",
"undeterring", "antiscolic",
"pentagamist", "hypoid",
"cacuminal", "sertularian",
"schoolmasterism", "nonuple",
"gallybeggar", "phytonic",
"swearingly", "nebular",
"Confervales", "thermochemically",
"characinoid", "cocksuredom",
"fallacious", "feasibleness",
"debromination", "playfellowship",
"tramplike", "testa",
"participatingly", "unaccessible",
"bromate", "experientialist",
"roughcast", "docimastical",
"choralcelo", "blightbird",
"peptonate", "sombreroed",
"unschematized", "antiabolitionist",
"besagne", "mastication",
"bromic", "sviatonosite",
"cattimandoo", "metaphrastical",
"endotheliomyoma", "hysterolysis",
"unfulminated", "Hester",
"oblongly", "blurredness",
"authorling", "chasmy",
"Scorpaenidae", "toxihaemia",
"Dictograph", "Quakerishly",
"deaf", "timbermonger",
"strammel", "Thraupidae",
"seditious", "plerome",
"Arneb", "eristically",
"serpentinic", "glaumrie",
"socioromantic", "apocalypst",
"tartrous", "Bassaris",
"angiolymphoma", "horsefly",
"kenno", "astronomize",
"euphemious", "arsenide",
"untongued", "parabolicness",
"uvanite", "helpless",
"gemmeous", "stormy",
"templar", "erythrodextrin",
"comism", "interfraternal",
"preparative", "parastas",
"frontoorbital", "Ophiosaurus",
"diopside", "serosanguineous",
"ununiformly", "karyological",
"collegian", "allotropic",
"depravity", "amylogenesis",
"reformatory", "epidymides",
"pleurotropous", "trillium",
"dastardliness", "coadvice",
"embryotic", "benthonic",
"pomiferous", "figureheadship",
"Megaluridae", "Harpa",
"frenal", "commotion",
"abthainry", "cobeliever",
"manilla", "spiciferous",
"nativeness", "obispo",
"monilioid", "biopsic",
"valvula", "enterostomy",
"planosubulate", "pterostigma",
"lifter", "triradiated",
"venialness", "tum",
"archistome", "tautness",
"unswanlike", "antivenin",
"Lentibulariaceae", "Triphora",
"angiopathy", "anta",
"Dawsonia", "becomma",
"Yannigan", "winterproof",
"antalgol", "harr",
"underogating", "ineunt",
"cornberry", "flippantness",
"scyphostoma", "approbation",
"Ghent", "Macraucheniidae",
"scabbiness", "unanatomized",
"photoelasticity", "eurythermal",
"enation", "prepavement",
"flushgate", "subsequentially",
"Edo", "antihero",
"Isokontae", "unforkedness",
"porriginous", "daytime",
"nonexecutive", "trisilicic",
"morphiomania", "paranephros",
"botchedly", "impugnation",
"Dodecatheon", "obolus",
"unburnt", "provedore",
"Aktistetae", "superindifference",
"Alethea", "Joachimite",
"cyanophilous", "chorograph",
"brooky", "figured",
"periclitation", "quintette",
"hondo", "ornithodelphous",
"unefficient", "pondside",
"bogydom", "laurinoxylon",
"Shiah", "unharmed",
"cartful", "noncrystallized",
"abusiveness", "cromlech",
"japanned", "rizzomed",
"underskin", "adscendent",
"allectory", "gelatinousness",
"volcano", "uncompromisingly",
"cubit", "idiotize",
"unfurbelowed", "undinted",
"magnetooptics", "Savitar",
"diwata", "ramosopalmate",
"Pishquow", "tomorn",
"apopenptic", "Haversian",
"Hysterocarpus", "ten",
"outhue", "Bertat",
"mechanist", "asparaginic",
"velaric", "tonsure",
"bubble", "Pyrales",
"regardful", "glyphography",
"calabazilla", "shellworker",
"stradametrical", "havoc",
"theologicopolitical", "sawdust",
"diatomaceous", "jajman",
"temporomastoid", "Serrifera",
"Ochnaceae", "aspersor",
"trailmaking", "Bishareen",
"digitule", "octogynous",
"epididymitis", "smokefarthings",
"bacillite", "overcrown",
"mangonism", "sirrah",
"undecorated", "psychofugal",
"bismuthiferous", "rechar",
"Lemuridae", "frameable",
"thiodiazole", "Scanic",
"sportswomanship", "interruptedness",
"admissory", "osteopaedion",
"tingly", "tomorrowness",
"ethnocracy", "trabecular",
"vitally", "fossilism",
"adz", "metopon",
"prefatorial", "expiscate",
"diathermacy", "chronist",
"nigh", "generalizable",
"hysterogen", "aurothiosulphuric",
"whitlowwort", "downthrust",
"Protestantize", "monander",
"Itea", "chronographic",
"silicize", "Dunlop",
"eer", "componental",
"spot", "pamphlet",
"antineuritic", "paradisean",
"interruptor", "debellator",
"overcultured", "Florissant",
"hyocholic", "pneumatotherapy",
"tailoress", "rave",
"unpeople", "Sebastian",
"thermanesthesia", "Coniferae",
"swacking", "posterishness",
"ethmopalatal", "whittle",
"analgize", "scabbardless",
"naught", "symbiogenetically",
"trip", "parodist",
"columniform", "trunnel",
"yawler", "goodwill",
"pseudohalogen", "swangy",
"cervisial", "mediateness",
"genii", "imprescribable",
"pony", "consumptional",
"carposporangial", "poleax",
"bestill", "subfebrile",
"sapphiric", "arrowworm",
"qualminess", "ultraobscure",
"thorite", "Fouquieria",
"Bermudian", "prescriber",
"elemicin", "warlike",
"semiangle", "rotular",
"misthread", "returnability",
"seraphism", "precostal",
"quarried", "Babylonism",
"sangaree", "seelful",
"placatory", "pachydermous",
"bozal", "galbulus",
"spermaphyte", "cumbrousness",
"pope", "signifier",
"Endomycetaceae", "shallowish",
"sequacity", "periarthritis",
"bathysphere", "pentosuria",
"Dadaism", "spookdom",
"Consolamentum", "afterpressure",
"mutter", "louse",
"ovoviviparous", "corbel",
"metastoma", "biventer",
"Hydrangea", "hogmace",
"seizing", "nonsuppressed",
"oratorize", "uncarefully",
"benzothiofuran", "penult",
"balanocele", "macropterous",
"dishpan", "marten",
"absvolt", "jirble",
"parmelioid", "airfreighter",
"acocotl", "archesporial",
"hypoplastral", "preoral",
"quailberry", "cinque",
"terrestrially", "stroking",
"limpet", "moodishness",
"canicule", "archididascalian",
"pompiloid", "overstaid",
"introducer", "Italical",
"Christianopaganism", "prescriptible",
"subofficer", "danseuse",
"cloy", "saguran",
"frictionlessly", "deindividualization",
"Bulanda", "ventricous",
"subfoliar", "basto",
"scapuloradial", "suspend",
"stiffish", "Sphenodontidae",
"eternal", "verbid",
"mammonish", "upcushion",
"barkometer", "concretion",
"preagitate", "incomprehensible",
"tristich", "visceral",
"hemimelus", "patroller",
"stentorophonic", "pinulus",
"kerykeion", "brutism",
"monstership", "merciful",
"overinstruct", "defensibly",
"bettermost", "splenauxe",
"Mormyrus", "unreprimanded",
"taver", "ell",
"proacquittal", "infestation",
"overwoven", "Lincolnlike",
"chacona", "Tamil",
"classificational", "lebensraum",
"reeveland", "intuition",
"Whilkut", "focaloid",
"Eleusinian", "micromembrane",
"byroad", "nonrepetition",
"bacterioblast", "brag",
"ribaldrous", "phytoma",
"counteralliance", "pelvimetry",
"pelf", "relaster",
"thermoresistant", "aneurism",
"molossic", "euphonym",
"upswell", "ladhood",
"phallaceous", "inertly",
"gunshop", "stereotypography",
"laryngic", "refasten",
"twinling", "oflete",
"hepatorrhaphy", "electrotechnics",
"cockal", "guitarist",
"topsail", "Cimmerianism",
"larklike", "Llandovery",
"pyrocatechol", "immatchable",
"chooser", "metrocratic",
"craglike", "quadrennial",
"nonpoisonous", "undercolored",
"knob", "ultratense",
"balladmonger", "slait",
"sialadenitis", "bucketer",
"magnificently", "unstipulated",
"unscourged", "unsupercilious",
"packsack", "pansophism",
"soorkee", "percent",
"subirrigate", "champer",
"metapolitics", "spherulitic",
"involatile", "metaphonical",
"stachyuraceous", "speckedness",
"bespin", "proboscidiform",
"gul", "squit",
"yeelaman", "peristeropode",
"opacousness", "shibuichi",
"retinize", "yote",
"misexposition", "devilwise",
"pumpkinification", "vinny",
"bonze", "glossing",
"decardinalize", "transcortical",
"serphoid", "deepmost",
"guanajuatite", "wemless",
"arval", "lammy",
"Effie", "Saponaria",
"tetrahedral", "prolificy",
"excerpt", "dunkadoo",
"Spencerism", "insatiately",
"Gilaki", "oratorship",
"arduousness", "unbashfulness",
"Pithecolobium", "unisexuality",
"veterinarian", "detractive",
"liquidity", "acidophile",
"proauction", "sural",
"totaquina", "Vichyite",
"uninhabitedness", "allegedly",
"Gothish", "manny",
"Inger", "flutist",
"ticktick", "Ludgatian",
"homotransplant", "orthopedical",
"diminutively", "monogoneutic",
"Kenipsim", "sarcologist",
"drome", "stronghearted",
"Fameuse", "Swaziland",
"alen", "chilblain",
"beatable", "agglomeratic",
"constitutor", "tendomucoid",
"porencephalous", "arteriasis",
"boser", "tantivy",
"rede", "lineamental",
"uncontradictableness", "homeotypical",
"masa", "folious",
"dosseret", "neurodegenerative",
"subtransverse", "Chiasmodontidae",
"palaeotheriodont", "unstressedly",
"chalcites", "piquantness",
"lampyrine", "Aplacentalia",
"projecting", "elastivity",
"isopelletierin", "bladderwort",
"strander", "almud",
"iniquitously", "theologal",
"bugre", "chargeably",
"imperceptivity", "meriquinoidal",
"mesophyte", "divinator",
"perfunctory", "counterappellant",
"synovial", "charioteer",
"crystallographical", "comprovincial",
"infrastapedial", "pleasurehood",
"inventurous", "ultrasystematic",
"subangulated", "supraoesophageal",
"Vaishnavism", "transude",
"chrysochrous", "ungrave",
"reconciliable", "uninterpleaded",
"erlking", "wherefrom",
"aprosopia", "antiadiaphorist",
"metoxazine", "incalculable",
"umbellic", "predebit",
"foursquare", "unimmortal",
"nonmanufacture", "slangy",
"predisputant", "familist",
"preaffiliate", "friarhood",
"corelysis", "zoonitic",
"halloo", "paunchy",
"neuromimesis", "aconitine",
"hackneyed", "unfeeble",
"cubby", "autoschediastical",
"naprapath", "lyrebird",
"inexistency", "leucophoenicite",
"ferrogoslarite", "reperuse",
"uncombable", "tambo",
"propodiale", "diplomatize",
"Russifier", "clanned",
"corona", "michigan",
"nonutilitarian", "transcorporeal",
"bought", "Cercosporella",
"stapedius", "glandularly",
"pictorially", "weism",
"disilane", "rainproof",
"Caphtor", "scrubbed",
"oinomancy", "pseudoxanthine",
"nonlustrous", "redesertion",
"Oryzorictinae", "gala",
"Mycogone", "reappreciate",
"cyanoguanidine", "seeingness",
"breadwinner", "noreast",
"furacious", "epauliere",
"omniscribent", "Passiflorales",
"uninductive", "inductivity",
"Orbitolina", "Semecarpus",
"migrainoid", "steprelationship",
"phlogisticate", "mesymnion",
"sloped", "edificator",
"beneficent", "culm",
"paleornithology", "unurban",
"throbless", "amplexifoliate",
"sesquiquintile", "sapience",
"astucious", "dithery",
"boor", "ambitus",
"scotching", "uloid",
"uncompromisingness", "hoove",
"waird", "marshiness",
"Jerusalem", "mericarp",
"unevoked", "benzoperoxide",
"outguess", "pyxie",
"hymnic", "euphemize",
"mendacity", "erythremia",
"rosaniline", "unchatteled",
"lienteria", "Bushongo",
"dialoguer", "unrepealably",
"rivethead", "antideflation",
"vinegarish", "manganosiderite",
"doubtingness", "ovopyriform",
"Cephalodiscus", "Muscicapa",
"Animalivora", "angina",
"planispheric", "ipomoein",
"cuproiodargyrite", "sandbox",
"scrat", "Munnopsidae",
"shola", "pentafid",
"overstudiousness", "times",
"nonprofession", "appetible",
"valvulotomy", "goladar",
"uniarticular", "oxyterpene",
"unlapsing", "omega",
"trophonema", "seminonflammable",
"circumzenithal", "starer",
"depthwise", "liberatress",
"unleavened", "unrevolting",
"groundneedle", "topline",
"wandoo", "umangite",
"ordinant", "unachievable",
"oversand", "snare",
"avengeful", "unexplicit",
"mustafina", "sonable",
"rehabilitative", "eulogization",
"papery", "technopsychology",
"impressor", "cresylite",
"entame", "transudatory",
"scotale", "pachydermatoid",
"imaginary", "yeat",
"slipped", "stewardship",
"adatom", "cockstone",
"skyshine", "heavenful",
"comparability", "exprobratory",
"dermorhynchous", "parquet",
"cretaceous", "vesperal",
"raphis", "undangered",
"Glecoma", "engrain",
"counteractively", "Zuludom",
"orchiocatabasis", "Auriculariales",
"warriorwise", "extraorganismal",
"overbuilt", "alveolite",
"tetchy", "terrificness",
"widdle", "unpremonished",
"rebilling", "sequestrum",
"equiconvex", "heliocentricism",
"catabaptist", "okonite",
"propheticism", "helminthagogic",
"calycular", "giantly",
"wingable", "golem",
"unprovided", "commandingness",
"greave", "haply",
"doina", "depressingly",
"subdentate", "impairment",
"decidable", "neurotrophic",
"unpredict", "bicorporeal",
"pendulant", "flatman",
"intrabred", "toplike",
"Prosobranchiata", "farrantly",
"toxoplasmosis", "gorilloid",
"dipsomaniacal", "aquiline",
"atlantite", "ascitic",
"perculsive", "prospectiveness",
"saponaceous", "centrifugalization",
"dinical", "infravaginal",
"beadroll", "affaite",
"Helvidian", "tickleproof",
"abstractionism", "enhedge",
"outwealth", "overcontribute",
"coldfinch", "gymnastic",
"Pincian", "Munychian",
"codisjunct", "quad",
"coracomandibular", "phoenicochroite",
"amender", "selectivity",
"putative", "semantician",
"lophotrichic", "Spatangoidea",
"saccharogenic", "inferent",
"Triconodonta", "arrendation",
"sheepskin", "taurocolla",
"bunghole", "Machiavel",
"triakistetrahedral", "dehairer",
"prezygapophysial", "cylindric",
"pneumonalgia", "sleigher",
"emir", "Socraticism",
"licitness", "massedly",
"instructiveness", "sturdied",
"redecrease", "starosta",
"evictor", "orgiastic",
"squdge", "meloplasty",
"Tsonecan", "repealableness",
"swoony", "myesthesia",
"molecule", "autobiographist",
"reciprocation", "refective",
"unobservantness", "tricae",
"ungouged", "floatability",
"Mesua", "fetlocked",
"chordacentrum", "sedentariness",
"various", "laubanite",
"nectopod", "zenick",
"sequentially", "analgic",
"biodynamics", "posttraumatic",
"nummi", "pyroacetic",
"bot", "redescend",
"dispermy", "undiffusive",
"circular", "trillion",
"Uraniidae", "ploration",
"discipular", "potentness",
"sud", "Hu",
"Eryon", "plugger",
"subdrainage", "jharal",
"abscission", "supermarket",
"countergabion", "glacierist",
"lithotresis", "minniebush",
"zanyism", "eucalypteol",
"sterilely", "unrealize",
"unpatched", "hypochondriacism",
"critically", "cheesecutter",
};
}
| 40,590 | 52.550132 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class WordCount {
public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer
extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length < 2) {
System.err.println("Usage: wordcount <in> [<in>...] <out>");
System.exit(2);
}
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
for (int i = 0; i < otherArgs.length - 1; ++i) {
FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
}
FileOutputFormat.setOutputPath(job,
new Path(otherArgs[otherArgs.length - 1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 3,305 | 35.733333 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Iterator;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat;
import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
import org.apache.hadoop.mapreduce.lib.db.DBWritable;
import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.hsqldb.server.Server;
/**
* This is a demonstrative program, which uses DBInputFormat for reading
* the input data from a database, and DBOutputFormat for writing the data
* to the database.
* <br>
* The Program first creates the necessary tables, populates the input table
* and runs the mapred job.
* <br>
* The input data is a mini access log, with a <code><url,referrer,time>
* </code> schema.The output is the number of pageviews of each url in the log,
* having the schema <code><url,pageview></code>.
*
* When called with no arguments the program starts a local HSQLDB server, and
* uses this database for storing/retrieving the data.
* <br>
* This program requires some additional configuration relating to HSQLDB.
* The the hsqldb jar should be added to the classpath:
* <br>
* <code>export HADOOP_CLASSPATH=share/hadoop/mapreduce/lib-examples/hsqldb-2.0.0.jar</code>
* <br>
* And the hsqldb jar should be included with the <code>-libjars</code>
* argument when executing it with hadoop:
* <br>
* <code>-libjars share/hadoop/mapreduce/lib-examples/hsqldb-2.0.0.jar</code>
*/
public class DBCountPageView extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(DBCountPageView.class);
private Connection connection;
private boolean initialized = false;
private static final String[] AccessFieldNames = {"url", "referrer", "time"};
private static final String[] PageviewFieldNames = {"url", "pageview"};
private static final String DB_URL =
"jdbc:hsqldb:hsql://localhost/URLAccess";
private static final String DRIVER_CLASS = "org.hsqldb.jdbc.JDBCDriver";
private Server server;
private void startHsqldbServer() {
server = new Server();
server.setDatabasePath(0,
System.getProperty("test.build.data", "/tmp") + "/URLAccess");
server.setDatabaseName(0, "URLAccess");
server.start();
}
private void createConnection(String driverClassName
, String url) throws Exception {
Class.forName(driverClassName);
connection = DriverManager.getConnection(url);
connection.setAutoCommit(false);
}
private void shutdown() {
try {
connection.commit();
connection.close();
}catch (Throwable ex) {
LOG.warn("Exception occurred while closing connection :"
+ StringUtils.stringifyException(ex));
} finally {
try {
if(server != null) {
server.shutdown();
}
}catch (Throwable ex) {
LOG.warn("Exception occurred while shutting down HSQLDB :"
+ StringUtils.stringifyException(ex));
}
}
}
private void initialize(String driverClassName, String url)
throws Exception {
if(!this.initialized) {
if(driverClassName.equals(DRIVER_CLASS)) {
startHsqldbServer();
}
createConnection(driverClassName, url);
dropTables();
createTables();
populateAccess();
this.initialized = true;
}
}
private void dropTables() {
String dropAccess = "DROP TABLE Access";
String dropPageview = "DROP TABLE Pageview";
Statement st = null;
try {
st = connection.createStatement();
st.executeUpdate(dropAccess);
st.executeUpdate(dropPageview);
connection.commit();
st.close();
}catch (SQLException ex) {
try { if (st != null) { st.close(); } } catch (Exception e) {}
}
}
private void createTables() throws SQLException {
String createAccess =
"CREATE TABLE " +
"Access(url VARCHAR(100) NOT NULL," +
" referrer VARCHAR(100)," +
" time BIGINT NOT NULL, " +
" PRIMARY KEY (url, time))";
String createPageview =
"CREATE TABLE " +
"Pageview(url VARCHAR(100) NOT NULL," +
" pageview BIGINT NOT NULL, " +
" PRIMARY KEY (url))";
Statement st = connection.createStatement();
try {
st.executeUpdate(createAccess);
st.executeUpdate(createPageview);
connection.commit();
} finally {
st.close();
}
}
/**
* Populates the Access table with generated records.
*/
private void populateAccess() throws SQLException {
PreparedStatement statement = null ;
try {
statement = connection.prepareStatement(
"INSERT INTO Access(url, referrer, time)" +
" VALUES (?, ?, ?)");
Random random = new Random();
int time = random.nextInt(50) + 50;
final int PROBABILITY_PRECISION = 100; // 1 / 100
final int NEW_PAGE_PROBABILITY = 15; // 15 / 100
//Pages in the site :
String[] pages = {"/a", "/b", "/c", "/d", "/e",
"/f", "/g", "/h", "/i", "/j"};
//linkMatrix[i] is the array of pages(indexes) that page_i links to.
int[][] linkMatrix = {{1,5,7}, {0,7,4,6,}, {0,1,7,8},
{0,2,4,6,7,9}, {0,1}, {0,3,5,9}, {0}, {0,1,3}, {0,2,6}, {0,2,6}};
//a mini model of user browsing a la pagerank
int currentPage = random.nextInt(pages.length);
String referrer = null;
for(int i=0; i<time; i++) {
statement.setString(1, pages[currentPage]);
statement.setString(2, referrer);
statement.setLong(3, i);
statement.execute();
int action = random.nextInt(PROBABILITY_PRECISION);
// go to a new page with probability
// NEW_PAGE_PROBABILITY / PROBABILITY_PRECISION
if(action < NEW_PAGE_PROBABILITY) {
currentPage = random.nextInt(pages.length); // a random page
referrer = null;
}
else {
referrer = pages[currentPage];
action = random.nextInt(linkMatrix[currentPage].length);
currentPage = linkMatrix[currentPage][action];
}
}
connection.commit();
}catch (SQLException ex) {
connection.rollback();
throw ex;
} finally {
if(statement != null) {
statement.close();
}
}
}
/**Verifies the results are correct */
private boolean verify() throws SQLException {
//check total num pageview
String countAccessQuery = "SELECT COUNT(*) FROM Access";
String sumPageviewQuery = "SELECT SUM(pageview) FROM Pageview";
Statement st = null;
ResultSet rs = null;
try {
st = connection.createStatement();
rs = st.executeQuery(countAccessQuery);
rs.next();
long totalPageview = rs.getLong(1);
rs = st.executeQuery(sumPageviewQuery);
rs.next();
long sumPageview = rs.getLong(1);
LOG.info("totalPageview=" + totalPageview);
LOG.info("sumPageview=" + sumPageview);
return totalPageview == sumPageview && totalPageview != 0;
}finally {
if(st != null)
st.close();
if(rs != null)
rs.close();
}
}
/** Holds a <url, referrer, time > tuple */
static class AccessRecord implements Writable, DBWritable {
String url;
String referrer;
long time;
@Override
public void readFields(DataInput in) throws IOException {
this.url = Text.readString(in);
this.referrer = Text.readString(in);
this.time = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, url);
Text.writeString(out, referrer);
out.writeLong(time);
}
@Override
public void readFields(ResultSet resultSet) throws SQLException {
this.url = resultSet.getString(1);
this.referrer = resultSet.getString(2);
this.time = resultSet.getLong(3);
}
@Override
public void write(PreparedStatement statement) throws SQLException {
statement.setString(1, url);
statement.setString(2, referrer);
statement.setLong(3, time);
}
}
/** Holds a <url, pageview > tuple */
static class PageviewRecord implements Writable, DBWritable {
String url;
long pageview;
public PageviewRecord(String url, long pageview) {
this.url = url;
this.pageview = pageview;
}
@Override
public void readFields(DataInput in) throws IOException {
this.url = Text.readString(in);
this.pageview = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, url);
out.writeLong(pageview);
}
@Override
public void readFields(ResultSet resultSet) throws SQLException {
this.url = resultSet.getString(1);
this.pageview = resultSet.getLong(2);
}
@Override
public void write(PreparedStatement statement) throws SQLException {
statement.setString(1, url);
statement.setLong(2, pageview);
}
@Override
public String toString() {
return url + " " + pageview;
}
}
/**
* Mapper extracts URLs from the AccessRecord (tuples from db),
* and emits a <url,1> pair for each access record.
*/
static class PageviewMapper extends
Mapper<LongWritable, AccessRecord, Text, LongWritable> {
LongWritable ONE = new LongWritable(1L);
@Override
public void map(LongWritable key, AccessRecord value, Context context)
throws IOException, InterruptedException {
Text oKey = new Text(value.url);
context.write(oKey, ONE);
}
}
/**
* Reducer sums up the pageviews and emits a PageviewRecord,
* which will correspond to one tuple in the db.
*/
static class PageviewReducer extends
Reducer<Text, LongWritable, PageviewRecord, NullWritable> {
NullWritable n = NullWritable.get();
@Override
public void reduce(Text key, Iterable<LongWritable> values,
Context context) throws IOException, InterruptedException {
long sum = 0L;
for(LongWritable value: values) {
sum += value.get();
}
context.write(new PageviewRecord(key.toString(), sum), n);
}
}
@Override
//Usage DBCountPageView [driverClass dburl]
public int run(String[] args) throws Exception {
String driverClassName = DRIVER_CLASS;
String url = DB_URL;
if(args.length > 1) {
driverClassName = args[0];
url = args[1];
}
initialize(driverClassName, url);
Configuration conf = getConf();
DBConfiguration.configureDB(conf, driverClassName, url);
Job job = new Job(conf);
job.setJobName("Count Pageviews of URLs");
job.setJarByClass(DBCountPageView.class);
job.setMapperClass(PageviewMapper.class);
job.setCombinerClass(LongSumReducer.class);
job.setReducerClass(PageviewReducer.class);
DBInputFormat.setInput(job, AccessRecord.class, "Access"
, null, "url", AccessFieldNames);
DBOutputFormat.setOutput(job, "Pageview", PageviewFieldNames);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputKeyClass(PageviewRecord.class);
job.setOutputValueClass(NullWritable.class);
int ret;
try {
ret = job.waitForCompletion(true) ? 0 : 1;
boolean correct = verify();
if(!correct) {
throw new RuntimeException("Evaluation was not correct!");
}
} finally {
shutdown();
}
return ret;
}
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new DBCountPageView(), args);
System.exit(ret);
}
}
| 13,491 | 29.944954 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.dancing;
import java.io.*;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.*;
import com.google.common.base.Charsets;
/**
* Launch a distributed pentomino solver.
* It generates a complete list of prefixes of length N with each unique prefix
* as a separate line. A prefix is a sequence of N integers that denote the
* index of the row that is choosen for each column in order. Note that the
* next column is heuristically choosen by the solver, so it is dependant on
* the previous choice. That file is given as the input to
* map/reduce. The output key/value are the move prefix/solution as Text/Text.
*/
public class DistributedPentomino extends Configured implements Tool {
private static final int PENT_DEPTH = 5;
private static final int PENT_WIDTH = 9;
private static final int PENT_HEIGHT = 10;
private static final int DEFAULT_MAPS = 2000;
/**
* Each map takes a line, which represents a prefix move and finds all of
* the solutions that start with that prefix. The output is the prefix as
* the key and the solution as the value.
*/
public static class PentMap extends
Mapper<WritableComparable<?>, Text, Text, Text> {
private int width;
private int height;
private int depth;
private Pentomino pent;
private Text prefixString;
private Context context;
/**
* For each solution, generate the prefix and a string representation
* of the solution. The solution starts with a newline, so that the output
* looks like:
* <prefix>,
* <solution>
*
*/
class SolutionCatcher
implements DancingLinks.SolutionAcceptor<Pentomino.ColumnName> {
public void solution(List<List<Pentomino.ColumnName>> answer) {
String board = Pentomino.stringifySolution(width, height, answer);
try {
context.write(prefixString, new Text("\n" + board));
context.getCounter(pent.getCategory(answer)).increment(1);
} catch (IOException e) {
System.err.println(StringUtils.stringifyException(e));
} catch (InterruptedException ie) {
System.err.println(StringUtils.stringifyException(ie));
}
}
}
/**
* Break the prefix string into moves (a sequence of integer row ids that
* will be selected for each column in order). Find all solutions with
* that prefix.
*/
public void map(WritableComparable<?> key, Text value,Context context)
throws IOException {
prefixString = value;
StringTokenizer itr = new StringTokenizer(prefixString.toString(), ",");
int[] prefix = new int[depth];
int idx = 0;
while (itr.hasMoreTokens()) {
String num = itr.nextToken();
prefix[idx++] = Integer.parseInt(num);
}
pent.solve(prefix);
}
@Override
public void setup(Context context) {
this.context = context;
Configuration conf = context.getConfiguration();
depth = conf.getInt(Pentomino.DEPTH, PENT_DEPTH);
width = conf.getInt(Pentomino.WIDTH, PENT_WIDTH);
height = conf.getInt(Pentomino.HEIGHT, PENT_HEIGHT);
pent = (Pentomino)
ReflectionUtils.newInstance(conf.getClass(Pentomino.CLASS,
OneSidedPentomino.class),
conf);
pent.initialize(width, height);
pent.setPrinter(new SolutionCatcher());
}
}
/**
* Create the input file with all of the possible combinations of the
* given depth.
* @param fs the filesystem to write into
* @param dir the directory to write the input file into
* @param pent the puzzle
* @param depth the depth to explore when generating prefixes
*/
private static long createInputDirectory(FileSystem fs,
Path dir,
Pentomino pent,
int depth
) throws IOException {
fs.mkdirs(dir);
List<int[]> splits = pent.getSplits(depth);
Path input = new Path(dir, "part1");
PrintWriter file =
new PrintWriter(new OutputStreamWriter(new BufferedOutputStream
(fs.create(input), 64*1024), Charsets.UTF_8));
for(int[] prefix: splits) {
for(int i=0; i < prefix.length; ++i) {
if (i != 0) {
file.print(',');
}
file.print(prefix[i]);
}
file.print('\n');
}
file.close();
return fs.getFileStatus(input).getLen();
}
/**
* Launch the solver on 9x10 board and the one sided pentominos.
* This takes about 2.5 hours on 20 nodes with 2 cpus/node.
* Splits the job into 2000 maps and 1 reduce.
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(),
new DistributedPentomino(), args);
System.exit(res);
}
public int run(String[] args) throws Exception {
Configuration conf = getConf();
if (args.length == 0) {
System.out.println("Usage: pentomino <output> [-depth #] [-height #] [-width #]");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
// check for passed parameters, otherwise use defaults
int width = conf.getInt(Pentomino.WIDTH, PENT_WIDTH);
int height = conf.getInt(Pentomino.HEIGHT, PENT_HEIGHT);
int depth = conf.getInt(Pentomino.DEPTH, PENT_DEPTH);
for (int i = 0; i < args.length; i++) {
if (args[i].equalsIgnoreCase("-depth")) {
depth = Integer.parseInt(args[++i].trim());
} else if (args[i].equalsIgnoreCase("-height")) {
height = Integer.parseInt(args[++i].trim());
} else if (args[i].equalsIgnoreCase("-width") ) {
width = Integer.parseInt(args[++i].trim());
}
}
// now set the values within conf for M/R tasks to read, this
// will ensure values are set preventing MAPREDUCE-4678
conf.setInt(Pentomino.WIDTH, width);
conf.setInt(Pentomino.HEIGHT, height);
conf.setInt(Pentomino.DEPTH, depth);
Class<? extends Pentomino> pentClass = conf.getClass(Pentomino.CLASS,
OneSidedPentomino.class, Pentomino.class);
int numMaps = conf.getInt(MRJobConfig.NUM_MAPS, DEFAULT_MAPS);
Path output = new Path(args[0]);
Path input = new Path(output + "_input");
FileSystem fileSys = FileSystem.get(conf);
try {
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, input);
FileOutputFormat.setOutputPath(job, output);
job.setJarByClass(PentMap.class);
job.setJobName("dancingElephant");
Pentomino pent = ReflectionUtils.newInstance(pentClass, conf);
pent.initialize(width, height);
long inputSize = createInputDirectory(fileSys, input, pent, depth);
// for forcing the number of maps
FileInputFormat.setMaxInputSplitSize(job, (inputSize/numMaps));
// the keys are the prefix strings
job.setOutputKeyClass(Text.class);
// the values are puzzle solutions
job.setOutputValueClass(Text.class);
job.setMapperClass(PentMap.class);
job.setReducerClass(Reducer.class);
job.setNumReduceTasks(1);
return (job.waitForCompletion(true) ? 0 : 1);
} finally {
fileSys.delete(input, true);
}
}
}
| 8,685 | 36.930131 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.dancing;
import java.util.*;
public class Pentomino {
public static final String DEPTH = "mapreduce.pentomino.depth";
public static final String WIDTH = "mapreduce.pentomino.width";
public static final String HEIGHT = "mapreduce.pentomino.height";
public static final String CLASS = "mapreduce.pentomino.class";
/**
* This interface just is a marker for what types I expect to get back
* as column names.
*/
protected static interface ColumnName {
// NOTHING
}
/**
* Maintain information about a puzzle piece.
*/
protected static class Piece implements ColumnName {
private String name;
private boolean [][] shape;
private int[] rotations;
private boolean flippable;
public Piece(String name, String shape,
boolean flippable, int[] rotations) {
this.name = name;
this.rotations = rotations;
this.flippable = flippable;
StringTokenizer parser = new StringTokenizer(shape, "/");
List<boolean[]> lines = new ArrayList<boolean[]>();
while (parser.hasMoreTokens()) {
String token = parser.nextToken();
boolean[] line = new boolean[token.length()];
for(int i=0; i < line.length; ++i) {
line[i] = token.charAt(i) == 'x';
}
lines.add(line);
}
this.shape = new boolean[lines.size()][];
for(int i=0 ; i < lines.size(); i++) {
this.shape[i] = lines.get(i);
}
}
public String getName() {
return name;
}
public int[] getRotations() {
return rotations.clone();
}
public boolean getFlippable() {
return flippable;
}
private int doFlip(boolean flip, int x, int max) {
if (flip) {
return max - x - 1;
} else {
return x;
}
}
public boolean[][] getShape(boolean flip, int rotate) {
boolean [][] result;
if (rotate % 2 == 0) {
int height = shape.length;
int width = shape[0].length;
result = new boolean[height][];
boolean flipX = rotate == 2;
boolean flipY = flip ^ (rotate == 2);
for (int y = 0; y < height; ++y) {
result[y] = new boolean[width];
for (int x=0; x < width; ++x) {
result[y][x] = shape[doFlip(flipY, y, height)]
[doFlip(flipX, x, width)];
}
}
} else {
int height = shape[0].length;
int width = shape.length;
result = new boolean[height][];
boolean flipX = rotate == 3;
boolean flipY = flip ^ (rotate == 1);
for (int y = 0; y < height; ++y) {
result[y] = new boolean[width];
for (int x=0; x < width; ++x) {
result[y][x] = shape[doFlip(flipX, x, width)]
[doFlip(flipY, y, height)];
}
}
}
return result;
}
}
/**
* A point in the puzzle board. This represents a placement of a piece into
* a given point on the board.
*/
static class Point implements ColumnName {
int x;
int y;
Point(int x, int y) {
this.x = x;
this.y = y;
}
}
/**
* Convert a solution to the puzzle returned by the model into a string
* that represents the placement of the pieces onto the board.
* @param width the width of the puzzle board
* @param height the height of the puzzle board
* @param solution the list of column names that were selected in the model
* @return a string representation of completed puzzle board
*/
public static String stringifySolution(int width, int height,
List<List<ColumnName>> solution) {
String[][] picture = new String[height][width];
StringBuffer result = new StringBuffer();
// for each piece placement...
for(List<ColumnName> row: solution) {
// go through to find which piece was placed
Piece piece = null;
for(ColumnName item: row) {
if (item instanceof Piece) {
piece = (Piece) item;
break;
}
}
// for each point where the piece was placed, mark it with the piece name
for(ColumnName item: row) {
if (item instanceof Point) {
Point p = (Point) item;
picture[p.y][p.x] = piece.getName();
}
}
}
// put the string together
for(int y=0; y < picture.length; ++y) {
for (int x=0; x < picture[y].length; ++x) {
result.append(picture[y][x]);
}
result.append("\n");
}
return result.toString();
}
public enum SolutionCategory {UPPER_LEFT, MID_X, MID_Y, CENTER}
/**
* Find whether the solution has the x in the upper left quadrant, the
* x-midline, the y-midline or in the center.
* @param names the solution to check
* @return the catagory of the solution
*/
public SolutionCategory getCategory(List<List<ColumnName>> names) {
Piece xPiece = null;
// find the "x" piece
for(Piece p: pieces) {
if ("x".equals(p.name)) {
xPiece = p;
break;
}
}
// find the row containing the "x"
for(List<ColumnName> row: names) {
if (row.contains(xPiece)) {
// figure out where the "x" is located
int low_x = width;
int high_x = 0;
int low_y = height;
int high_y = 0;
for(ColumnName col: row) {
if (col instanceof Point) {
int x = ((Point) col).x;
int y = ((Point) col).y;
if (x < low_x) {
low_x = x;
}
if (x > high_x) {
high_x = x;
}
if (y < low_y) {
low_y = y;
}
if (y > high_y) {
high_y = y;
}
}
}
boolean mid_x = (low_x + high_x == width - 1);
boolean mid_y = (low_y + high_y == height - 1);
if (mid_x && mid_y) {
return SolutionCategory.CENTER;
} else if (mid_x) {
return SolutionCategory.MID_X;
} else if (mid_y) {
return SolutionCategory.MID_Y;
}
break;
}
}
return SolutionCategory.UPPER_LEFT;
}
/**
* A solution printer that just writes the solution to stdout.
*/
private static class SolutionPrinter
implements DancingLinks.SolutionAcceptor<ColumnName> {
int width;
int height;
public SolutionPrinter(int width, int height) {
this.width = width;
this.height = height;
}
public void solution(List<List<ColumnName>> names) {
System.out.println(stringifySolution(width, height, names));
}
}
protected int width;
protected int height;
protected List<Piece> pieces = new ArrayList<Piece>();
/**
* Is the piece fixed under rotation?
*/
protected static final int [] oneRotation = new int[]{0};
/**
* Is the piece identical if rotated 180 degrees?
*/
protected static final int [] twoRotations = new int[]{0,1};
/**
* Are all 4 rotations unique?
*/
protected static final int [] fourRotations = new int[]{0,1,2,3};
/**
* Fill in the pieces list.
*/
protected void initializePieces() {
pieces.add(new Piece("x", " x /xxx/ x ", false, oneRotation));
pieces.add(new Piece("v", "x /x /xxx", false, fourRotations));
pieces.add(new Piece("t", "xxx/ x / x ", false, fourRotations));
pieces.add(new Piece("w", " x/ xx/xx ", false, fourRotations));
pieces.add(new Piece("u", "x x/xxx", false, fourRotations));
pieces.add(new Piece("i", "xxxxx", false, twoRotations));
pieces.add(new Piece("f", " xx/xx / x ", true, fourRotations));
pieces.add(new Piece("p", "xx/xx/x ", true, fourRotations));
pieces.add(new Piece("z", "xx / x / xx", true, twoRotations));
pieces.add(new Piece("n", "xx / xxx", true, fourRotations));
pieces.add(new Piece("y", " x /xxxx", true, fourRotations));
pieces.add(new Piece("l", " x/xxxx", true, fourRotations));
}
/**
* Is the middle of piece on the upper/left side of the board with
* a given offset and size of the piece? This only checks in one
* dimension.
* @param offset the offset of the piece
* @param shapeSize the size of the piece
* @param board the size of the board
* @return is it in the upper/left?
*/
private static boolean isSide(int offset, int shapeSize, int board) {
return 2*offset + shapeSize <= board;
}
/**
* For a given piece, generate all of the potential placements and add them
* as rows to the model.
* @param dancer the problem model
* @param piece the piece we are trying to place
* @param width the width of the board
* @param height the height of the board
* @param flip is the piece flipped over?
* @param row a workspace the length of the each row in the table
* @param upperLeft is the piece constrained to the upper left of the board?
* this is used on a single piece to eliminate most of the trivial
* roations of the solution.
*/
private static void generateRows(DancingLinks dancer,
Piece piece,
int width,
int height,
boolean flip,
boolean[] row,
boolean upperLeft) {
// for each rotation
int[] rotations = piece.getRotations();
for(int rotIndex = 0; rotIndex < rotations.length; ++rotIndex) {
// get the shape
boolean[][] shape = piece.getShape(flip, rotations[rotIndex]);
// find all of the valid offsets
for(int x=0; x < width; ++x) {
for(int y=0; y < height; ++y) {
if (y + shape.length <= height && x + shape[0].length <= width &&
(!upperLeft ||
(isSide(x, shape[0].length, width) &&
isSide(y, shape.length, height)))) {
// clear the columns related to the points on the board
for(int idx=0; idx < width * height; ++idx) {
row[idx] = false;
}
// mark the shape
for(int subY=0; subY < shape.length; ++subY) {
for(int subX=0; subX < shape[0].length; ++subX) {
row[(y + subY) * width + x + subX] = shape[subY][subX];
}
}
dancer.addRow(row);
}
}
}
}
}
private DancingLinks<ColumnName> dancer = new DancingLinks<ColumnName>();
private DancingLinks.SolutionAcceptor<ColumnName> printer;
{
initializePieces();
}
/**
* Create the model for a given pentomino set of pieces and board size.
* @param width the width of the board in squares
* @param height the height of the board in squares
*/
public Pentomino(int width, int height) {
initialize(width, height);
}
/**
* Create the object without initialization.
*/
public Pentomino() {
}
void initialize(int width, int height) {
this.width = width;
this.height = height;
for(int y=0; y < height; ++y) {
for(int x=0; x < width; ++x) {
dancer.addColumn(new Point(x,y));
}
}
int pieceBase = dancer.getNumberColumns();
for(Piece p: pieces) {
dancer.addColumn(p);
}
boolean[] row = new boolean[dancer.getNumberColumns()];
for(int idx = 0; idx < pieces.size(); ++idx) {
Piece piece = pieces.get(idx);
row[idx + pieceBase] = true;
generateRows(dancer, piece, width, height, false, row, idx == 0);
if (piece.getFlippable()) {
generateRows(dancer, piece, width, height, true, row, idx == 0);
}
row[idx + pieceBase] = false;
}
printer = new SolutionPrinter(width, height);
}
/**
* Generate a list of prefixes to a given depth
* @param depth the length of each prefix
* @return a list of arrays of ints, which are potential prefixes
*/
public List<int[]> getSplits(int depth) {
return dancer.split(depth);
}
/**
* Find all of the solutions that start with the given prefix. The printer
* is given each solution as it is found.
* @param split a list of row indexes that should be choosen for each row
* in order
* @return the number of solutions found
*/
public int solve(int[] split) {
return dancer.solve(split, printer);
}
/**
* Find all of the solutions to the puzzle.
* @return the number of solutions found
*/
public int solve() {
return dancer.solve(printer);
}
/**
* Set the printer for the puzzle.
* @param printer A call-back object that is given each solution as it is
* found.
*/
public void setPrinter(DancingLinks.SolutionAcceptor<ColumnName> printer) {
this.printer = printer;
}
/**
* Solve the 6x10 pentomino puzzle.
*/
public static void main(String[] args) {
int width = 6;
int height = 10;
Pentomino model = new Pentomino(width, height);
List splits = model.getSplits(2);
for(Iterator splitItr=splits.iterator(); splitItr.hasNext(); ) {
int[] choices = (int[]) splitItr.next();
System.out.print("split:");
for(int i=0; i < choices.length; ++i) {
System.out.print(" " + choices[i]);
}
System.out.println();
System.out.println(model.solve(choices) + " solutions found.");
}
}
}
| 14,342 | 30.523077 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.dancing;
import java.io.*;
import java.util.*;
import com.google.common.base.Charsets;
/**
* This class uses the dancing links algorithm from Knuth to solve sudoku
* puzzles. It has solved 42x42 puzzles in 1.02 seconds.
*/
public class Sudoku {
/**
* The preset values in the board
* board[y][x] is the value at x,y with -1 = any
*/
private int[][] board;
/**
* The size of the board
*/
private int size;
/**
* The size of the sub-squares in cells across
*/
private int squareXSize;
/**
* The size of the sub-squares in celss up and down
*/
private int squareYSize;
/**
* This interface is a marker class for the columns created for the
* Sudoku solver.
*/
protected static interface ColumnName {
// NOTHING
}
/**
* A string containing a representation of the solution.
* @param size the size of the board
* @param solution a list of list of column names
* @return a string of the solution matrix
*/
static String stringifySolution(int size, List<List<ColumnName>> solution) {
int[][] picture = new int[size][size];
StringBuffer result = new StringBuffer();
// go through the rows selected in the model and build a picture of the
// solution.
for(List<ColumnName> row: solution) {
int x = -1;
int y = -1;
int num = -1;
for(ColumnName item: row) {
if (item instanceof ColumnConstraint) {
x = ((ColumnConstraint) item).column;
num = ((ColumnConstraint) item).num;
} else if (item instanceof RowConstraint) {
y = ((RowConstraint) item).row;
}
}
picture[y][x] = num;
}
// build the string
for(int y=0; y < size; ++y) {
for (int x=0; x < size; ++x) {
result.append(picture[y][x]);
result.append(" ");
}
result.append("\n");
}
return result.toString();
}
/**
* An acceptor to get the solutions to the puzzle as they are generated and
* print them to the console.
*/
private static class SolutionPrinter
implements DancingLinks.SolutionAcceptor<ColumnName> {
int size;
public SolutionPrinter(int size) {
this.size = size;
}
/**
* A debugging aid that just prints the raw information about the
* dancing link columns that were selected for each row.
* @param solution a list of list of column names
*/
void rawWrite(List solution) {
for (Iterator itr=solution.iterator(); itr.hasNext(); ) {
Iterator subitr = ((List) itr.next()).iterator();
while (subitr.hasNext()) {
System.out.print(subitr.next().toString() + " ");
}
System.out.println();
}
}
public void solution(List<List<ColumnName>> names) {
System.out.println(stringifySolution(size, names));
}
}
/**
* Set up a puzzle board to the given size.
* Boards may be asymmetric, but the squares will always be divided to be
* more cells wide than they are tall. For example, a 6x6 puzzle will make
* sub-squares that are 3x2 (3 cells wide, 2 cells tall). Clearly that means
* the board is made up of 2x3 sub-squares.
* @param stream The input stream to read the data from
*/
public Sudoku(InputStream stream) throws IOException {
BufferedReader file = new BufferedReader(
new InputStreamReader(stream, Charsets.UTF_8));
String line = file.readLine();
List<int[]> result = new ArrayList<int[]>();
while (line != null) {
StringTokenizer tokenizer = new StringTokenizer(line);
int size = tokenizer.countTokens();
int[] col = new int[size];
int y = 0;
while(tokenizer.hasMoreElements()) {
String word = tokenizer.nextToken();
if ("?".equals(word)) {
col[y] = - 1;
} else {
col[y] = Integer.parseInt(word);
}
y += 1;
}
result.add(col);
line = file.readLine();
}
size = result.size();
board = result.toArray(new int [size][]);
squareYSize = (int) Math.sqrt(size);
squareXSize = size / squareYSize;
file.close();
}
/**
* A constraint that each number can appear just once in a column.
*/
static private class ColumnConstraint implements ColumnName {
ColumnConstraint(int num, int column) {
this.num = num;
this.column = column;
}
int num;
int column;
public String toString() {
return num + " in column " + column;
}
}
/**
* A constraint that each number can appear just once in a row.
*/
static private class RowConstraint implements ColumnName {
RowConstraint(int num, int row) {
this.num = num;
this.row = row;
}
int num;
int row;
public String toString() {
return num + " in row " + row;
}
}
/**
* A constraint that each number can appear just once in a square.
*/
static private class SquareConstraint implements ColumnName {
SquareConstraint(int num, int x, int y) {
this.num = num;
this.x = x;
this.y = y;
}
int num;
int x;
int y;
public String toString() {
return num + " in square " + x + "," + y;
}
}
/**
* A constraint that each cell can only be used once.
*/
static private class CellConstraint implements ColumnName {
CellConstraint(int x, int y) {
this.x = x;
this.y = y;
}
int x;
int y;
public String toString() {
return "cell " + x + "," + y;
}
}
/**
* Create a row that places num in cell x, y.
* @param rowValues a scratch pad to mark the bits needed
* @param x the horizontal offset of the cell
* @param y the vertical offset of the cell
* @param num the number to place
* @return a bitvector of the columns selected
*/
private boolean[] generateRow(boolean[] rowValues, int x, int y, int num) {
// clear the scratch array
for(int i=0; i < rowValues.length; ++i) {
rowValues[i] = false;
}
// find the square coordinates
int xBox = x / squareXSize;
int yBox = y / squareYSize;
// mark the column
rowValues[x*size + num - 1] = true;
// mark the row
rowValues[size*size + y*size + num - 1] = true;
// mark the square
rowValues[2*size*size + (xBox*squareXSize + yBox)*size + num - 1] = true;
// mark the cell
rowValues[3*size*size + size*x + y] = true;
return rowValues;
}
private DancingLinks<ColumnName> makeModel() {
DancingLinks<ColumnName> model = new DancingLinks<ColumnName>();
// create all of the columns constraints
for(int x=0; x < size; ++x) {
for(int num=1; num <= size; ++num) {
model.addColumn(new ColumnConstraint(num, x));
}
}
// create all of the row constraints
for(int y=0; y < size; ++y) {
for(int num=1; num <= size; ++num) {
model.addColumn(new RowConstraint(num, y));
}
}
// create the square constraints
for(int x=0; x < squareYSize; ++x) {
for(int y=0; y < squareXSize; ++y) {
for(int num=1; num <= size; ++num) {
model.addColumn(new SquareConstraint(num, x, y));
}
}
}
// create the cell constraints
for(int x=0; x < size; ++x) {
for(int y=0; y < size; ++y) {
model.addColumn(new CellConstraint(x, y));
}
}
boolean[] rowValues = new boolean[size*size*4];
for(int x=0; x < size; ++x) {
for(int y=0; y < size; ++y) {
if (board[y][x] == -1) {
// try each possible value in the cell
for(int num=1; num <= size; ++num) {
model.addRow(generateRow(rowValues, x, y, num));
}
} else {
// put the given cell in place
model.addRow(generateRow(rowValues, x, y, board[y][x]));
}
}
}
return model;
}
public void solve() {
DancingLinks<ColumnName> model = makeModel();
int results = model.solve(new SolutionPrinter(size));
System.out.println("Found " + results + " solutions");
}
/**
* Solves a set of sudoku puzzles.
* @param args a list of puzzle filenames to solve
*/
public static void main(String[] args) throws IOException {
if (args.length == 0) {
System.out.println("Include a puzzle on the command line.");
}
for(int i=0; i < args.length; ++i) {
Sudoku problem = new Sudoku(new FileInputStream(args[i]));
System.out.println("Solving " + args[i]);
problem.solve();
}
}
}
| 9,369 | 28.099379 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/OneSidedPentomino.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.dancing;
/**
* Of the "normal" 12 pentominos, 6 of them have distinct shapes when flipped.
* This class includes both variants of the "flippable" shapes and the
* unflippable shapes for a total of 18 pieces. Clearly, the boards must have
* 18*5=90 boxes to hold all of the solutions.
*/
public class OneSidedPentomino extends Pentomino {
public OneSidedPentomino() {}
public OneSidedPentomino(int width, int height) {
super(width, height);
}
/**
* Define the one sided pieces. The flipped pieces have the same name with
* a capital letter.
*/
protected void initializePieces() {
pieces.add(new Piece("x", " x /xxx/ x ", false, oneRotation));
pieces.add(new Piece("v", "x /x /xxx", false, fourRotations));
pieces.add(new Piece("t", "xxx/ x / x ", false, fourRotations));
pieces.add(new Piece("w", " x/ xx/xx ", false, fourRotations));
pieces.add(new Piece("u", "x x/xxx", false, fourRotations));
pieces.add(new Piece("i", "xxxxx", false, twoRotations));
pieces.add(new Piece("f", " xx/xx / x ", false, fourRotations));
pieces.add(new Piece("p", "xx/xx/x ", false, fourRotations));
pieces.add(new Piece("z", "xx / x / xx", false, twoRotations));
pieces.add(new Piece("n", "xx / xxx", false, fourRotations));
pieces.add(new Piece("y", " x /xxxx", false, fourRotations));
pieces.add(new Piece("l", " x/xxxx", false, fourRotations));
pieces.add(new Piece("F", "xx / xx/ x ", false, fourRotations));
pieces.add(new Piece("P", "xx/xx/ x", false, fourRotations));
pieces.add(new Piece("Z", " xx/ x /xx ", false, twoRotations));
pieces.add(new Piece("N", " xx/xxx ", false, fourRotations));
pieces.add(new Piece("Y", " x /xxxx", false, fourRotations));
pieces.add(new Piece("L", "x /xxxx", false, fourRotations));
}
/**
* Solve the 3x30 puzzle.
* @param args
*/
public static void main(String[] args) {
Pentomino model = new OneSidedPentomino(3, 30);
int solutions = model.solve();
System.out.println(solutions + " solutions found.");
}
}
| 2,921 | 40.15493 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.dancing;
import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A generic solver for tile laying problems using Knuth's dancing link
* algorithm. It provides a very fast backtracking data structure for problems
* that can expressed as a sparse boolean matrix where the goal is to select a
* subset of the rows such that each column has exactly 1 true in it.
*
* The application gives each column a name and each row is named after the
* set of columns that it has as true. Solutions are passed back by giving the
* selected rows' names.
*
* The type parameter ColumnName is the class of application's column names.
*/
public class DancingLinks<ColumnName> {
private static final Log LOG =
LogFactory.getLog(DancingLinks.class.getName());
/**
* A cell in the table with up/down and left/right links that form doubly
* linked lists in both directions. It also includes a link to the column
* head.
*/
private static class Node<ColumnName> {
Node<ColumnName> left;
Node<ColumnName> right;
Node<ColumnName> up;
Node<ColumnName> down;
ColumnHeader<ColumnName> head;
Node(Node<ColumnName> l, Node<ColumnName> r, Node<ColumnName> u,
Node<ColumnName> d, ColumnHeader<ColumnName> h) {
left = l;
right = r;
up = u;
down = d;
head = h;
}
Node() {
this(null, null, null, null, null);
}
}
/**
* Column headers record the name of the column and the number of rows that
* satisfy this column. The names are provided by the application and can
* be anything. The size is used for the heuristic for picking the next
* column to explore.
*/
private static class ColumnHeader<ColumnName> extends Node<ColumnName> {
ColumnName name;
int size;
ColumnHeader(ColumnName n, int s) {
name = n;
size = s;
head = this;
}
ColumnHeader() {
this(null, 0);
}
}
/**
* The head of the table. Left/Right from the head are the unsatisfied
* ColumnHeader objects.
*/
private ColumnHeader<ColumnName> head;
/**
* The complete list of columns.
*/
private List<ColumnHeader<ColumnName>> columns;
public DancingLinks() {
head = new ColumnHeader<ColumnName>(null, 0);
head.left = head;
head.right = head;
head.up = head;
head.down = head;
columns = new ArrayList<ColumnHeader<ColumnName>>(200);
}
/**
* Add a column to the table
* @param name The name of the column, which will be returned as part of
* solutions
* @param primary Is the column required for a solution?
*/
public void addColumn(ColumnName name, boolean primary) {
ColumnHeader<ColumnName> top = new ColumnHeader<ColumnName>(name, 0);
top.up = top;
top.down = top;
if (primary) {
Node<ColumnName> tail = head.left;
tail.right = top;
top.left = tail;
top.right = head;
head.left = top;
} else {
top.left = top;
top.right = top;
}
columns.add(top);
}
/**
* Add a column to the table
* @param name The name of the column, which will be included in the solution
*/
public void addColumn(ColumnName name) {
addColumn(name, true);
}
/**
* Get the number of columns.
* @return the number of columns
*/
public int getNumberColumns() {
return columns.size();
}
/**
* Get the name of a given column as a string
* @param index the index of the column
* @return a string representation of the name
*/
public String getColumnName(int index) {
return columns.get(index).name.toString();
}
/**
* Add a row to the table.
* @param values the columns that are satisfied by this row
*/
public void addRow(boolean[] values) {
Node<ColumnName> prev = null;
for(int i=0; i < values.length; ++i) {
if (values[i]) {
ColumnHeader<ColumnName> top = columns.get(i);
top.size += 1;
Node<ColumnName> bottom = top.up;
Node<ColumnName> node = new Node<ColumnName>(null, null, bottom,
top, top);
bottom.down = node;
top.up = node;
if (prev != null) {
Node<ColumnName> front = prev.right;
node.left = prev;
node.right = front;
prev.right = node;
front.left = node;
} else {
node.left = node;
node.right = node;
}
prev = node;
}
}
}
/**
* Applications should implement this to receive the solutions to their
* problems.
*/
public interface SolutionAcceptor<ColumnName> {
/**
* A callback to return a solution to the application.
* @param value a List of List of ColumnNames that were satisfied by each
* selected row
*/
void solution(List<List<ColumnName>> value);
}
/**
* Find the column with the fewest choices.
* @return The column header
*/
private ColumnHeader<ColumnName> findBestColumn() {
int lowSize = Integer.MAX_VALUE;
ColumnHeader<ColumnName> result = null;
ColumnHeader<ColumnName> current = (ColumnHeader<ColumnName>) head.right;
while (current != head) {
if (current.size < lowSize) {
lowSize = current.size;
result = current;
}
current = (ColumnHeader<ColumnName>) current.right;
}
return result;
}
/**
* Hide a column in the table
* @param col the column to hide
*/
private void coverColumn(ColumnHeader<ColumnName> col) {
LOG.debug("cover " + col.head.name);
// remove the column
col.right.left = col.left;
col.left.right = col.right;
Node<ColumnName> row = col.down;
while (row != col) {
Node<ColumnName> node = row.right;
while (node != row) {
node.down.up = node.up;
node.up.down = node.down;
node.head.size -= 1;
node = node.right;
}
row = row.down;
}
}
/**
* Uncover a column that was hidden.
* @param col the column to unhide
*/
private void uncoverColumn(ColumnHeader<ColumnName> col) {
LOG.debug("uncover " + col.head.name);
Node<ColumnName> row = col.up;
while (row != col) {
Node<ColumnName> node = row.left;
while (node != row) {
node.head.size += 1;
node.down.up = node;
node.up.down = node;
node = node.left;
}
row = row.up;
}
col.right.left = col;
col.left.right = col;
}
/**
* Get the name of a row by getting the list of column names that it
* satisfies.
* @param row the row to make a name for
* @return the list of column names
*/
private List<ColumnName> getRowName(Node<ColumnName> row) {
List<ColumnName> result = new ArrayList<ColumnName>();
result.add(row.head.name);
Node<ColumnName> node = row.right;
while (node != row) {
result.add(node.head.name);
node = node.right;
}
return result;
}
/**
* Find a solution to the problem.
* @param partial a temporary datastructure to keep the current partial
* answer in
* @param output the acceptor for the results that are found
* @return the number of solutions found
*/
private int search(List<Node<ColumnName>> partial, SolutionAcceptor<ColumnName> output) {
int results = 0;
if (head.right == head) {
List<List<ColumnName>> result = new ArrayList<List<ColumnName>>(partial.size());
for(Node<ColumnName> row: partial) {
result.add(getRowName(row));
}
output.solution(result);
results += 1;
} else {
ColumnHeader<ColumnName> col = findBestColumn();
if (col.size > 0) {
coverColumn(col);
Node<ColumnName> row = col.down;
while (row != col) {
partial.add(row);
Node<ColumnName> node = row.right;
while (node != row) {
coverColumn(node.head);
node = node.right;
}
results += search(partial, output);
partial.remove(partial.size() - 1);
node = row.left;
while (node != row) {
uncoverColumn(node.head);
node = node.left;
}
row = row.down;
}
uncoverColumn(col);
}
}
return results;
}
/**
* Generate a list of prefixes down to a given depth. Assumes that the
* problem is always deeper than depth.
* @param depth the depth to explore down
* @param choices an array of length depth to describe a prefix
* @param prefixes a working datastructure
*/
private void searchPrefixes(int depth, int[] choices,
List<int[]> prefixes) {
if (depth == 0) {
prefixes.add(choices.clone());
} else {
ColumnHeader<ColumnName> col = findBestColumn();
if (col.size > 0) {
coverColumn(col);
Node<ColumnName> row = col.down;
int rowId = 0;
while (row != col) {
Node<ColumnName> node = row.right;
while (node != row) {
coverColumn(node.head);
node = node.right;
}
choices[choices.length - depth] = rowId;
searchPrefixes(depth - 1, choices, prefixes);
node = row.left;
while (node != row) {
uncoverColumn(node.head);
node = node.left;
}
row = row.down;
rowId += 1;
}
uncoverColumn(col);
}
}
}
/**
* Generate a list of row choices to cover the first moves.
* @param depth the length of the prefixes to generate
* @return a list of integer arrays that list the rows to pick in order
*/
public List<int[]> split(int depth) {
int[] choices = new int[depth];
List<int[]> result = new ArrayList<int[]>(100000);
searchPrefixes(depth, choices, result);
return result;
}
/**
* Make one move from a prefix
* @param goalRow the row that should be choosen
* @return the row that was found
*/
private Node<ColumnName> advance(int goalRow) {
ColumnHeader<ColumnName> col = findBestColumn();
if (col.size > 0) {
coverColumn(col);
Node<ColumnName> row = col.down;
int id = 0;
while (row != col) {
if (id == goalRow) {
Node<ColumnName> node = row.right;
while (node != row) {
coverColumn(node.head);
node = node.right;
}
return row;
}
id += 1;
row = row.down;
}
}
return null;
}
/**
* Undo a prefix exploration
* @param row
*/
private void rollback(Node<ColumnName> row) {
Node<ColumnName> node = row.left;
while (node != row) {
uncoverColumn(node.head);
node = node.left;
}
uncoverColumn(row.head);
}
/**
* Given a prefix, find solutions under it.
* @param prefix a list of row choices that control which part of the search
* tree to explore
* @param output the output for each solution
* @return the number of solutions
*/
public int solve(int[] prefix, SolutionAcceptor<ColumnName> output) {
List<Node<ColumnName>> choices = new ArrayList<Node<ColumnName>>();
for(int i=0; i < prefix.length; ++i) {
choices.add(advance(prefix[i]));
}
int result = search(choices, output);
for(int i=prefix.length-1; i >=0; --i) {
rollback(choices.get(i));
}
return result;
}
/**
* Solve a complete problem
* @param output the acceptor to receive answers
* @return the number of solutions
*/
public int solve(SolutionAcceptor<ColumnName> output) {
return search(new ArrayList<Node<ColumnName>>(), output);
}
}
| 12,675 | 27.874715 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.zip.Checksum;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.PureJavaCrc32;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Generate the official GraySort input data set.
* The user specifies the number of rows and the output directory and this
* class runs a map/reduce program to generate the data.
* The format of the data is:
* <ul>
* <li>(10 bytes key) (constant 2 bytes) (32 bytes rowid)
* (constant 4 bytes) (48 bytes filler) (constant 4 bytes)
* <li>The rowid is the right justified row id as a hex number.
* </ul>
*
* <p>
* To run the program:
* <b>bin/hadoop jar hadoop-*-examples.jar teragen 10000000000 in-dir</b>
*/
public class TeraGen extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(TeraSort.class);
public static enum Counters {CHECKSUM}
/**
* An input format that assigns ranges of longs to each mapper.
*/
static class RangeInputFormat
extends InputFormat<LongWritable, NullWritable> {
/**
* An input split consisting of a range on numbers.
*/
static class RangeInputSplit extends InputSplit implements Writable {
long firstRow;
long rowCount;
public RangeInputSplit() { }
public RangeInputSplit(long offset, long length) {
firstRow = offset;
rowCount = length;
}
public long getLength() throws IOException {
return 0;
}
public String[] getLocations() throws IOException {
return new String[]{};
}
public void readFields(DataInput in) throws IOException {
firstRow = WritableUtils.readVLong(in);
rowCount = WritableUtils.readVLong(in);
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, firstRow);
WritableUtils.writeVLong(out, rowCount);
}
}
/**
* A record reader that will generate a range of numbers.
*/
static class RangeRecordReader
extends RecordReader<LongWritable, NullWritable> {
long startRow;
long finishedRows;
long totalRows;
LongWritable key = null;
public RangeRecordReader() {
}
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
startRow = ((RangeInputSplit)split).firstRow;
finishedRows = 0;
totalRows = ((RangeInputSplit)split).rowCount;
}
public void close() throws IOException {
// NOTHING
}
public LongWritable getCurrentKey() {
return key;
}
public NullWritable getCurrentValue() {
return NullWritable.get();
}
public float getProgress() throws IOException {
return finishedRows / (float) totalRows;
}
public boolean nextKeyValue() {
if (key == null) {
key = new LongWritable();
}
if (finishedRows < totalRows) {
key.set(startRow + finishedRows);
finishedRows += 1;
return true;
} else {
return false;
}
}
}
public RecordReader<LongWritable, NullWritable>
createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException {
return new RangeRecordReader();
}
/**
* Create the desired number of splits, dividing the number of rows
* between the mappers.
*/
public List<InputSplit> getSplits(JobContext job) {
long totalRows = getNumberOfRows(job);
int numSplits = job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
LOG.info("Generating " + totalRows + " using " + numSplits);
List<InputSplit> splits = new ArrayList<InputSplit>();
long currentRow = 0;
for(int split = 0; split < numSplits; ++split) {
long goal =
(long) Math.ceil(totalRows * (double)(split + 1) / numSplits);
splits.add(new RangeInputSplit(currentRow, goal - currentRow));
currentRow = goal;
}
return splits;
}
}
static long getNumberOfRows(JobContext job) {
return job.getConfiguration().getLong(TeraSortConfigKeys.NUM_ROWS.key(),
TeraSortConfigKeys.DEFAULT_NUM_ROWS);
}
static void setNumberOfRows(Job job, long numRows) {
job.getConfiguration().setLong(TeraSortConfigKeys.NUM_ROWS.key(), numRows);
}
/**
* The Mapper class that given a row number, will generate the appropriate
* output line.
*/
public static class SortGenMapper
extends Mapper<LongWritable, NullWritable, Text, Text> {
private Text key = new Text();
private Text value = new Text();
private Unsigned16 rand = null;
private Unsigned16 rowId = null;
private Unsigned16 checksum = new Unsigned16();
private Checksum crc32 = new PureJavaCrc32();
private Unsigned16 total = new Unsigned16();
private static final Unsigned16 ONE = new Unsigned16(1);
private byte[] buffer = new byte[TeraInputFormat.KEY_LENGTH +
TeraInputFormat.VALUE_LENGTH];
private Counter checksumCounter;
public void map(LongWritable row, NullWritable ignored,
Context context) throws IOException, InterruptedException {
if (rand == null) {
rowId = new Unsigned16(row.get());
rand = Random16.skipAhead(rowId);
checksumCounter = context.getCounter(Counters.CHECKSUM);
}
Random16.nextRand(rand);
GenSort.generateRecord(buffer, rand, rowId);
key.set(buffer, 0, TeraInputFormat.KEY_LENGTH);
value.set(buffer, TeraInputFormat.KEY_LENGTH,
TeraInputFormat.VALUE_LENGTH);
context.write(key, value);
crc32.reset();
crc32.update(buffer, 0,
TeraInputFormat.KEY_LENGTH + TeraInputFormat.VALUE_LENGTH);
checksum.set(crc32.getValue());
total.add(checksum);
rowId.add(ONE);
}
@Override
public void cleanup(Context context) {
if (checksumCounter != null) {
checksumCounter.increment(total.getLow8());
}
}
}
private static void usage() throws IOException {
System.err.println("teragen <num rows> <output dir>");
}
/**
* Parse a number that optionally has a postfix that denotes a base.
* @param str an string integer with an option base {k,m,b,t}.
* @return the expanded value
*/
private static long parseHumanLong(String str) {
char tail = str.charAt(str.length() - 1);
long base = 1;
switch (tail) {
case 't':
base *= 1000 * 1000 * 1000 * 1000;
break;
case 'b':
base *= 1000 * 1000 * 1000;
break;
case 'm':
base *= 1000 * 1000;
break;
case 'k':
base *= 1000;
break;
default:
}
if (base != 1) {
str = str.substring(0, str.length() - 1);
}
return Long.parseLong(str) * base;
}
/**
* @param args the cli arguments
*/
public int run(String[] args)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = Job.getInstance(getConf());
if (args.length != 2) {
usage();
return 2;
}
setNumberOfRows(job, parseHumanLong(args[0]));
Path outputDir = new Path(args[1]);
FileOutputFormat.setOutputPath(job, outputDir);
job.setJobName("TeraGen");
job.setJarByClass(TeraGen.class);
job.setMapperClass(SortGenMapper.class);
job.setNumReduceTasks(0);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(RangeInputFormat.class);
job.setOutputFormatClass(TeraOutputFormat.class);
return job.waitForCompletion(true) ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new TeraGen(), args);
System.exit(res);
}
}
| 9,799 | 30.71521 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/GenSort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import java.math.BigInteger;
import java.util.zip.Checksum;
import org.apache.hadoop.util.PureJavaCrc32;
/**
* A single process data generator for the terasort data. Based on gensort.c
* version 1.1 (3 Mar 2009) from Chris Nyberg <[email protected]>.
*/
public class GenSort {
/**
* Generate a "binary" record suitable for all sort benchmarks *except*
* PennySort.
*/
static void generateRecord(byte[] recBuf, Unsigned16 rand,
Unsigned16 recordNumber) {
/* generate the 10-byte key using the high 10 bytes of the 128-bit
* random number
*/
for(int i=0; i < 10; ++i) {
recBuf[i] = rand.getByte(i);
}
/* add 2 bytes of "break" */
recBuf[10] = 0x00;
recBuf[11] = 0x11;
/* convert the 128-bit record number to 32 bits of ascii hexadecimal
* as the next 32 bytes of the record.
*/
for (int i = 0; i < 32; i++) {
recBuf[12 + i] = (byte) recordNumber.getHexDigit(i);
}
/* add 4 bytes of "break" data */
recBuf[44] = (byte) 0x88;
recBuf[45] = (byte) 0x99;
recBuf[46] = (byte) 0xAA;
recBuf[47] = (byte) 0xBB;
/* add 48 bytes of filler based on low 48 bits of random number */
for(int i=0; i < 12; ++i) {
recBuf[48+i*4] = recBuf[49+i*4] = recBuf[50+i*4] = recBuf[51+i*4] =
(byte) rand.getHexDigit(20 + i);
}
/* add 4 bytes of "break" data */
recBuf[96] = (byte) 0xCC;
recBuf[97] = (byte) 0xDD;
recBuf[98] = (byte) 0xEE;
recBuf[99] = (byte) 0xFF;
}
private static BigInteger makeBigInteger(long x) {
byte[] data = new byte[8];
for(int i=0; i < 8; ++i) {
data[i] = (byte) (x >>> (56 - 8*i));
}
return new BigInteger(1, data);
}
private static final BigInteger NINETY_FIVE = new BigInteger("95");
/**
* Generate an ascii record suitable for all sort benchmarks including
* PennySort.
*/
static void generateAsciiRecord(byte[] recBuf, Unsigned16 rand,
Unsigned16 recordNumber) {
/* generate the 10-byte ascii key using mostly the high 64 bits.
*/
long temp = rand.getHigh8();
if (temp < 0) {
// use biginteger to avoid the negative sign problem
BigInteger bigTemp = makeBigInteger(temp);
recBuf[0] = (byte) (' ' + (bigTemp.mod(NINETY_FIVE).longValue()));
temp = bigTemp.divide(NINETY_FIVE).longValue();
} else {
recBuf[0] = (byte) (' ' + (temp % 95));
temp /= 95;
}
for(int i=1; i < 8; ++i) {
recBuf[i] = (byte) (' ' + (temp % 95));
temp /= 95;
}
temp = rand.getLow8();
if (temp < 0) {
BigInteger bigTemp = makeBigInteger(temp);
recBuf[8] = (byte) (' ' + (bigTemp.mod(NINETY_FIVE).longValue()));
temp = bigTemp.divide(NINETY_FIVE).longValue();
} else {
recBuf[8] = (byte) (' ' + (temp % 95));
temp /= 95;
}
recBuf[9] = (byte)(' ' + (temp % 95));
/* add 2 bytes of "break" */
recBuf[10] = ' ';
recBuf[11] = ' ';
/* convert the 128-bit record number to 32 bits of ascii hexadecimal
* as the next 32 bytes of the record.
*/
for (int i = 0; i < 32; i++) {
recBuf[12 + i] = (byte) recordNumber.getHexDigit(i);
}
/* add 2 bytes of "break" data */
recBuf[44] = ' ';
recBuf[45] = ' ';
/* add 52 bytes of filler based on low 48 bits of random number */
for(int i=0; i < 13; ++i) {
recBuf[46+i*4] = recBuf[47+i*4] = recBuf[48+i*4] = recBuf[49+i*4] =
(byte) rand.getHexDigit(19 + i);
}
/* add 2 bytes of "break" data */
recBuf[98] = '\r'; /* nice for Windows */
recBuf[99] = '\n';
}
private static void usage() {
PrintStream out = System.out;
out.println("usage: gensort [-a] [-c] [-bSTARTING_REC_NUM] NUM_RECS FILE_NAME");
out.println("-a Generate ascii records required for PennySort or JouleSort.");
out.println(" These records are also an alternative input for the other");
out.println(" sort benchmarks. Without this flag, binary records will be");
out.println(" generated that contain the highest density of randomness in");
out.println(" the 10-byte key.");
out.println( "-c Calculate the sum of the crc32 checksums of each of the");
out.println(" generated records and send it to standard error.");
out.println("-bN Set the beginning record generated to N. By default the");
out.println(" first record generated is record 0.");
out.println("NUM_RECS The number of sequential records to generate.");
out.println("FILE_NAME The name of the file to write the records to.\n");
out.println("Example 1 - to generate 1000000 ascii records starting at record 0 to");
out.println("the file named \"pennyinput\":");
out.println(" gensort -a 1000000 pennyinput\n");
out.println("Example 2 - to generate 1000 binary records beginning with record 2000");
out.println("to the file named \"partition2\":");
out.println(" gensort -b2000 1000 partition2");
System.exit(1);
}
public static void outputRecords(OutputStream out,
boolean useAscii,
Unsigned16 firstRecordNumber,
Unsigned16 recordsToGenerate,
Unsigned16 checksum
) throws IOException {
byte[] row = new byte[100];
Unsigned16 recordNumber = new Unsigned16(firstRecordNumber);
Unsigned16 lastRecordNumber = new Unsigned16(firstRecordNumber);
Checksum crc = new PureJavaCrc32();
Unsigned16 tmp = new Unsigned16();
lastRecordNumber.add(recordsToGenerate);
Unsigned16 ONE = new Unsigned16(1);
Unsigned16 rand = Random16.skipAhead(firstRecordNumber);
while (!recordNumber.equals(lastRecordNumber)) {
Random16.nextRand(rand);
if (useAscii) {
generateAsciiRecord(row, rand, recordNumber);
} else {
generateRecord(row, rand, recordNumber);
}
if (checksum != null) {
crc.reset();
crc.update(row, 0, row.length);
tmp.set(crc.getValue());
checksum.add(tmp);
}
recordNumber.add(ONE);
out.write(row);
}
}
public static void main(String[] args) throws Exception {
Unsigned16 startingRecord = new Unsigned16();
Unsigned16 numberOfRecords;
OutputStream out;
boolean useAscii = false;
Unsigned16 checksum = null;
int i;
for(i=0; i < args.length; ++i) {
String arg = args[i];
int argLength = arg.length();
if (argLength >= 1 && arg.charAt(0) == '-') {
if (argLength < 2) {
usage();
}
switch (arg.charAt(1)) {
case 'a':
useAscii = true;
break;
case 'b':
startingRecord = Unsigned16.fromDecimal(arg.substring(2));
break;
case 'c':
checksum = new Unsigned16();
break;
default:
usage();
}
} else {
break;
}
}
if (args.length - i != 2) {
usage();
}
numberOfRecords = Unsigned16.fromDecimal(args[i]);
out = new FileOutputStream(args[i+1]);
outputRecords(out, useAscii, startingRecord, numberOfRecords, checksum);
out.close();
if (checksum != null) {
System.out.println(checksum);
}
}
}
| 8,485 | 32.674603 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileAlreadyExistsException;
import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.security.TokenCache;
/**
* An output format that writes the key and value appended together.
*/
public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
private OutputCommitter committer = null;
/**
* Set the requirement for a final sync before the stream is closed.
*/
static void setFinalSync(JobContext job, boolean newValue) {
job.getConfiguration().setBoolean(
TeraSortConfigKeys.FINAL_SYNC_ATTRIBUTE.key(), newValue);
}
/**
* Does the user want a final sync at close?
*/
public static boolean getFinalSync(JobContext job) {
return job.getConfiguration().getBoolean(
TeraSortConfigKeys.FINAL_SYNC_ATTRIBUTE.key(),
TeraSortConfigKeys.DEFAULT_FINAL_SYNC_ATTRIBUTE);
}
static class TeraRecordWriter extends RecordWriter<Text,Text> {
private boolean finalSync = false;
private FSDataOutputStream out;
public TeraRecordWriter(FSDataOutputStream out,
JobContext job) {
finalSync = getFinalSync(job);
this.out = out;
}
public synchronized void write(Text key,
Text value) throws IOException {
out.write(key.getBytes(), 0, key.getLength());
out.write(value.getBytes(), 0, value.getLength());
}
public void close(TaskAttemptContext context) throws IOException {
if (finalSync) {
out.sync();
}
out.close();
}
}
@Override
public void checkOutputSpecs(JobContext job
) throws InvalidJobConfException, IOException {
// Ensure that the output directory is set
Path outDir = getOutputPath(job);
if (outDir == null) {
throw new InvalidJobConfException("Output directory not set in JobConf.");
}
final Configuration jobConf = job.getConfiguration();
// get delegation token for outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] { outDir }, jobConf);
final FileSystem fs = outDir.getFileSystem(jobConf);
if (fs.exists(outDir)) {
// existing output dir is considered empty iff its only content is the
// partition file.
//
final FileStatus[] outDirKids = fs.listStatus(outDir);
boolean empty = false;
if (outDirKids != null && outDirKids.length == 1) {
final FileStatus st = outDirKids[0];
final String fname = st.getPath().getName();
empty =
!st.isDirectory() && TeraInputFormat.PARTITION_FILENAME.equals(fname);
}
if (TeraSort.getUseSimplePartitioner(job) || !empty) {
throw new FileAlreadyExistsException("Output directory " + outDir
+ " already exists");
}
}
}
public RecordWriter<Text,Text> getRecordWriter(TaskAttemptContext job
) throws IOException {
Path file = getDefaultWorkFile(job, "");
FileSystem fs = file.getFileSystem(job.getConfiguration());
FSDataOutputStream fileOut = fs.create(file);
return new TeraRecordWriter(fileOut, job);
}
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
throws IOException {
if (committer == null) {
Path output = getOutputPath(context);
committer = new FileOutputCommitter(output, context);
}
return committer;
}
}
| 4,963 | 34.457143 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraValidate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.IOException;
import java.util.zip.Checksum;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.PureJavaCrc32;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Generate 1 mapper per a file that checks to make sure the keys
* are sorted within each file. The mapper also generates
* "$file:begin", first key and "$file:end", last key. The reduce verifies that
* all of the start/end items are in order.
* Any output from the reduce is problem report.
* <p>
* To run the program:
* <b>bin/hadoop jar hadoop-*-examples.jar teravalidate out-dir report-dir</b>
* <p>
* If there is any output, something is wrong and the output of the reduce
* will have the problem report.
*/
public class TeraValidate extends Configured implements Tool {
private static final Text ERROR = new Text("error");
private static final Text CHECKSUM = new Text("checksum");
private static String textifyBytes(Text t) {
BytesWritable b = new BytesWritable();
b.set(t.getBytes(), 0, t.getLength());
return b.toString();
}
static class ValidateMapper extends Mapper<Text,Text,Text,Text> {
private Text lastKey;
private String filename;
private Unsigned16 checksum = new Unsigned16();
private Unsigned16 tmp = new Unsigned16();
private Checksum crc32 = new PureJavaCrc32();
/**
* Get the final part of the input name
* @param split the input split
* @return the "part-r-00000" for the input
*/
private String getFilename(FileSplit split) {
return split.getPath().getName();
}
public void map(Text key, Text value, Context context)
throws IOException, InterruptedException {
if (lastKey == null) {
FileSplit fs = (FileSplit) context.getInputSplit();
filename = getFilename(fs);
context.write(new Text(filename + ":begin"), key);
lastKey = new Text();
} else {
if (key.compareTo(lastKey) < 0) {
context.write(ERROR, new Text("misorder in " + filename +
" between " + textifyBytes(lastKey) +
" and " + textifyBytes(key)));
}
}
// compute the crc of the key and value and add it to the sum
crc32.reset();
crc32.update(key.getBytes(), 0, key.getLength());
crc32.update(value.getBytes(), 0, value.getLength());
tmp.set(crc32.getValue());
checksum.add(tmp);
lastKey.set(key);
}
public void cleanup(Context context)
throws IOException, InterruptedException {
if (lastKey != null) {
context.write(new Text(filename + ":end"), lastKey);
context.write(CHECKSUM, new Text(checksum.toString()));
}
}
}
/**
* Check the boundaries between the output files by making sure that the
* boundary keys are always increasing.
* Also passes any error reports along intact.
*/
static class ValidateReducer extends Reducer<Text,Text,Text,Text> {
private boolean firstKey = true;
private Text lastKey = new Text();
private Text lastValue = new Text();
public void reduce(Text key, Iterable<Text> values,
Context context) throws IOException, InterruptedException {
if (ERROR.equals(key)) {
for (Text val : values) {
context.write(key, val);
}
} else if (CHECKSUM.equals(key)) {
Unsigned16 tmp = new Unsigned16();
Unsigned16 sum = new Unsigned16();
for (Text val : values) {
tmp.set(val.toString());
sum.add(tmp);
}
context.write(CHECKSUM, new Text(sum.toString()));
} else {
Text value = values.iterator().next();
if (firstKey) {
firstKey = false;
} else {
if (value.compareTo(lastValue) < 0) {
context.write(ERROR,
new Text("bad key partitioning:\n file " +
lastKey + " key " +
textifyBytes(lastValue) +
"\n file " + key + " key " +
textifyBytes(value)));
}
}
lastKey.set(key);
lastValue.set(value);
}
}
}
private static void usage() throws IOException {
System.err.println("teravalidate <out-dir> <report-dir>");
}
public int run(String[] args) throws Exception {
Job job = Job.getInstance(getConf());
if (args.length != 2) {
usage();
return 1;
}
TeraInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setJobName("TeraValidate");
job.setJarByClass(TeraValidate.class);
job.setMapperClass(ValidateMapper.class);
job.setReducerClass(ValidateReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// force a single reducer
job.setNumReduceTasks(1);
// force a single split
FileInputFormat.setMinInputSplitSize(job, Long.MAX_VALUE);
job.setInputFormatClass(TeraInputFormat.class);
return job.waitForCompletion(true) ? 0 : 1;
}
/**
* @param args
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new TeraValidate(), args);
System.exit(res);
}
}
| 6,787 | 34.726316 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSortConfigKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* <p>
* TeraSort configurations.
* </p>
*/
@Private
@Unstable
public enum TeraSortConfigKeys {
NUM_ROWS("mapreduce.terasort.num-rows",
"Number of rows to generate during teragen."),
NUM_PARTITIONS("mapreduce.terasort.num.partitions",
"Number of partitions used for sampling."),
SAMPLE_SIZE("mapreduce.terasort.partitions.sample",
"Sample size for each partition."),
FINAL_SYNC_ATTRIBUTE("mapreduce.terasort.final.sync",
"Perform a disk-persisting hsync at end of every file-write."),
USE_TERA_SCHEDULER("mapreduce.terasort.use.terascheduler",
"Use TeraScheduler for computing input split distribution."),
USE_SIMPLE_PARTITIONER("mapreduce.terasort.simplepartitioner",
"Use SimplePartitioner instead of TotalOrderPartitioner."),
OUTPUT_REPLICATION("mapreduce.terasort.output.replication",
"Replication factor to use for output data files.");
private String confName;
private String description;
TeraSortConfigKeys(String configName, String description) {
this.confName = configName;
this.description = description;
}
public String key() {
return this.confName;
}
public String toString() {
return "<" + confName + "> " + description;
}
public static final long DEFAULT_NUM_ROWS = 0L;
public static final int DEFAULT_NUM_PARTITIONS = 10;
public static final long DEFAULT_SAMPLE_SIZE = 100000L;
public static final boolean DEFAULT_FINAL_SYNC_ATTRIBUTE = true;
public static final boolean DEFAULT_USE_TERA_SCHEDULER = true;
public static final boolean DEFAULT_USE_SIMPLE_PARTITIONER = false;
public static final int DEFAULT_OUTPUT_REPLICATION = 1;
}
| 2,663 | 33.153846 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/Random16.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
/**
* This class implements a 128-bit linear congruential generator.
* Specifically, if X0 is the most recently issued 128-bit random
* number (or a seed of 0 if no random number has already been generated,
* the next number to be generated, X1, is equal to:
* X1 = (a * X0 + c) mod 2**128
* where a is 47026247687942121848144207491837523525
* or 0x2360ed051fc65da44385df649fccf645
* and c is 98910279301475397889117759788405497857
* or 0x4a696d47726179524950202020202001
* The coefficient "a" is suggested by:
* Pierre L'Ecuyer, "Tables of linear congruential generators of different
* sizes and good lattice structure", Mathematics of Computation, 68
* pp. 249 - 260 (1999)
* http://www.ams.org/mcom/1999-68-225/S0025-5718-99-00996-5/S0025-5718-99-00996-5.pdf
* The constant "c" meets the simple suggestion by the same reference that
* it be odd.
*
* There is also a facility for quickly advancing the state of the
* generator by a fixed number of steps - this facilitates parallel
* generation.
*
* This is based on 1.0 of rand16.c from Chris Nyberg
* <[email protected]>.
*/
class Random16 {
/**
* The "Gen" array contain powers of 2 of the linear congruential generator.
* The index 0 struct contain the "a" coefficient and "c" constant for the
* generator. That is, the generator is:
* f(x) = (Gen[0].a * x + Gen[0].c) mod 2**128
*
* All structs after the first contain an "a" and "c" that
* comprise the square of the previous function.
*
* f**2(x) = (Gen[1].a * x + Gen[1].c) mod 2**128
* f**4(x) = (Gen[2].a * x + Gen[2].c) mod 2**128
* f**8(x) = (Gen[3].a * x + Gen[3].c) mod 2**128
* ...
*/
private static class RandomConstant {
final Unsigned16 a;
final Unsigned16 c;
public RandomConstant(String left, String right) {
a = new Unsigned16(left);
c = new Unsigned16(right);
}
}
private static final RandomConstant[] genArray = new RandomConstant[]{
/* [ 0] */ new RandomConstant("2360ed051fc65da44385df649fccf645",
"4a696d47726179524950202020202001"),
/* [ 1] */ new RandomConstant("17bce35bdf69743c529ed9eb20e0ae99",
"95e0e48262b3edfe04479485c755b646"),
/* [ 2] */ new RandomConstant("f4dd417327db7a9bd194dfbe42d45771",
"882a02c315362b60765f100068b33a1c"),
/* [ 3] */ new RandomConstant("6347af777a7898f6d1a2d6f33505ffe1",
"5efc4abfaca23e8ca8edb1f2dfbf6478"),
/* [ 4] */ new RandomConstant("b6a4239f3b315f84f6ef6d3d288c03c1",
"f25bd15439d16af594c1b1bafa6239f0"),
/* [ 5] */ new RandomConstant("2c82901ad1cb0cd182b631ba6b261781",
"89ca67c29c9397d59c612596145db7e0"),
/* [ 6] */ new RandomConstant("dab03f988288676ee49e66c4d2746f01",
"8b6ae036713bd578a8093c8eae5c7fc0"),
/* [ 7] */ new RandomConstant("602167331d86cf5684fe009a6d09de01",
"98a2542fd23d0dbdff3b886cdb1d3f80"),
/* [ 8] */ new RandomConstant("61ecb5c24d95b058f04c80a23697bc01",
"954db923fdb7933e947cd1edcecb7f00"),
/* [ 9] */ new RandomConstant("4a5c31e0654c28aa60474e83bf3f7801",
"00be4a36657c98cd204e8c8af7dafe00"),
/* [ 10] */ new RandomConstant("ae4f079d54fbece1478331d3c6bef001",
"991965329dccb28d581199ab18c5fc00"),
/* [ 11] */ new RandomConstant("101b8cb830c7cb927ff1ed50ae7de001",
"e1a8705b63ad5b8cd6c3d268d5cbf800"),
/* [ 12] */ new RandomConstant("f54a27fc056b00e7563f3505e0fbc001",
"2b657bbfd6ed9d632079e70c3c97f000"),
/* [ 13] */ new RandomConstant("df8a6fc1a833d201f98d719dd1f78001",
"59b60ee4c52fa49e9fe90682bd2fe000"),
/* [ 14] */ new RandomConstant("5480a5015f101a4ea7e3f183e3ef0001",
"cc099c88030679464fe86aae8a5fc000"),
/* [ 15] */ new RandomConstant("a498509e76e5d7925f539c28c7de0001",
"06b9abff9f9f33dd30362c0154bf8000"),
/* [ 16] */ new RandomConstant("0798a3d8b10dc72e60121cd58fbc0001",
"e296707121688d5a0260b293a97f0000"),
/* [ 17] */ new RandomConstant("1647d1e78ec02e665fafcbbb1f780001",
"189ffc4701ff23cb8f8acf6b52fe0000"),
/* [ 18] */ new RandomConstant("a7c982285e72bf8c0c8ddfb63ef00001",
"5141110ab208fb9d61fb47e6a5fc0000"),
/* [ 19] */ new RandomConstant("3eb78ee8fb8c56dbc5d4e06c7de00001",
"3c97caa62540f2948d8d340d4bf80000"),
/* [ 20] */ new RandomConstant("72d03b6f4681f2f9fe8e44d8fbc00001",
"1b25cb9cfe5a0c963174f91a97f00000"),
/* [ 21] */ new RandomConstant("ea85f81e4f502c9bc8ae99b1f7800001",
"0c644570b4a487103c5436352fe00000"),
/* [ 22] */ new RandomConstant("629c320db08b00c6bfa57363ef000001",
"3d0589c28869472bde517c6a5fc00000"),
/* [ 23] */ new RandomConstant("c5c4b9ce268d074a386be6c7de000001",
"bc95e5ab36477e65534738d4bf800000"),
/* [ 24] */ new RandomConstant("f30bbbbed1596187555bcd8fbc000001",
"ddb02ff72a031c01011f71a97f000000"),
/* [ 25] */ new RandomConstant("4a1000fb26c9eeda3cc79b1f78000001",
"2561426086d9acdb6c82e352fe000000"),
/* [ 26] */ new RandomConstant("89fb5307f6bf8ce2c1cf363ef0000001",
"64a788e3c118ed1c8215c6a5fc000000"),
/* [ 27] */ new RandomConstant("830b7b3358a5d67ea49e6c7de0000001",
"e65ea321908627cfa86b8d4bf8000000"),
/* [ 28] */ new RandomConstant("fd8a51da91a69fe1cd3cd8fbc0000001",
"53d27225604d85f9e1d71a97f0000000"),
/* [ 29] */ new RandomConstant("901a48b642b90b55aa79b1f780000001",
"ca5ec7a3ed1fe55e07ae352fe0000000"),
/* [ 30] */ new RandomConstant("118cdefdf32144f394f363ef00000001",
"4daebb2e085330651f5c6a5fc0000000"),
/* [ 31] */ new RandomConstant("0a88c0a91cff430829e6c7de00000001",
"9d6f1a00a8f3f76e7eb8d4bf80000000"),
/* [ 32] */ new RandomConstant("433bef4314f16a9453cd8fbc00000001",
"158c62f2b31e496dfd71a97f00000000"),
/* [ 33] */ new RandomConstant("c294b02995ae6738a79b1f7800000001",
"290e84a2eb15fd1ffae352fe00000000"),
/* [ 34] */ new RandomConstant("913575e0da8b16b14f363ef000000001",
"e3dc1bfbe991a34ff5c6a5fc00000000"),
/* [ 35] */ new RandomConstant("2f61b9f871cf4e629e6c7de000000001",
"ddf540d020b9eadfeb8d4bf800000000"),
/* [ 36] */ new RandomConstant("78d26ccbd68320c53cd8fbc000000001",
"8ee4950177ce66bfd71a97f000000000"),
/* [ 37] */ new RandomConstant("8b7ebd037898518a79b1f78000000001",
"39e0f787c907117fae352fe000000000"),
/* [ 38] */ new RandomConstant("0b5507b61f78e314f363ef0000000001",
"659d2522f7b732ff5c6a5fc000000000"),
/* [ 39] */ new RandomConstant("4f884628f812c629e6c7de0000000001",
"9e8722938612a5feb8d4bf8000000000"),
/* [ 40] */ new RandomConstant("be896744d4a98c53cd8fbc0000000001",
"e941a65d66b64bfd71a97f0000000000"),
/* [ 41] */ new RandomConstant("daf63a553b6318a79b1f780000000001",
"7b50d19437b097fae352fe0000000000"),
/* [ 42] */ new RandomConstant("2d7a23d8bf06314f363ef00000000001",
"59d7b68e18712ff5c6a5fc0000000000"),
/* [ 43] */ new RandomConstant("392b046a9f0c629e6c7de00000000001",
"4087bab2d5225feb8d4bf80000000000"),
/* [ 44] */ new RandomConstant("eb30fbb9c218c53cd8fbc00000000001",
"b470abc03b44bfd71a97f00000000000"),
/* [ 45] */ new RandomConstant("b9cdc30594318a79b1f7800000000001",
"366630eaba897fae352fe00000000000"),
/* [ 46] */ new RandomConstant("014ab453686314f363ef000000000001",
"a2dfc77e8512ff5c6a5fc00000000000"),
/* [ 47] */ new RandomConstant("395221c7d0c629e6c7de000000000001",
"1e0d25a14a25feb8d4bf800000000000"),
/* [ 48] */ new RandomConstant("4d972813a18c53cd8fbc000000000001",
"9d50a5d3944bfd71a97f000000000000"),
/* [ 49] */ new RandomConstant("06f9e2374318a79b1f78000000000001",
"bf7ab5eb2897fae352fe000000000000"),
/* [ 50] */ new RandomConstant("bd220cae86314f363ef0000000000001",
"925b14e6512ff5c6a5fc000000000000"),
/* [ 51] */ new RandomConstant("36fd3a5d0c629e6c7de0000000000001",
"724cce0ca25feb8d4bf8000000000000"),
/* [ 52] */ new RandomConstant("60def8ba18c53cd8fbc0000000000001",
"1af42d1944bfd71a97f0000000000000"),
/* [ 53] */ new RandomConstant("8d500174318a79b1f780000000000001",
"0f529e32897fae352fe0000000000000"),
/* [ 54] */ new RandomConstant("48e842e86314f363ef00000000000001",
"844e4c6512ff5c6a5fc0000000000000"),
/* [ 55] */ new RandomConstant("4af185d0c629e6c7de00000000000001",
"9f40d8ca25feb8d4bf80000000000000"),
/* [ 56] */ new RandomConstant("7a670ba18c53cd8fbc00000000000001",
"9912b1944bfd71a97f00000000000000"),
/* [ 57] */ new RandomConstant("86de174318a79b1f7800000000000001",
"9c69632897fae352fe00000000000000"),
/* [ 58] */ new RandomConstant("55fc2e86314f363ef000000000000001",
"e1e2c6512ff5c6a5fc00000000000000"),
/* [ 59] */ new RandomConstant("ccf85d0c629e6c7de000000000000001",
"68058ca25feb8d4bf800000000000000"),
/* [ 60] */ new RandomConstant("1df0ba18c53cd8fbc000000000000001",
"610b1944bfd71a97f000000000000000"),
/* [ 61] */ new RandomConstant("4be174318a79b1f78000000000000001",
"061632897fae352fe000000000000000"),
/* [ 62] */ new RandomConstant("d7c2e86314f363ef0000000000000001",
"1c2c6512ff5c6a5fc000000000000000"),
/* [ 63] */ new RandomConstant("af85d0c629e6c7de0000000000000001",
"7858ca25feb8d4bf8000000000000000"),
/* [ 64] */ new RandomConstant("5f0ba18c53cd8fbc0000000000000001",
"f0b1944bfd71a97f0000000000000000"),
/* [ 65] */ new RandomConstant("be174318a79b1f780000000000000001",
"e1632897fae352fe0000000000000000"),
/* [ 66] */ new RandomConstant("7c2e86314f363ef00000000000000001",
"c2c6512ff5c6a5fc0000000000000000"),
/* [ 67] */ new RandomConstant("f85d0c629e6c7de00000000000000001",
"858ca25feb8d4bf80000000000000000"),
/* [ 68] */ new RandomConstant("f0ba18c53cd8fbc00000000000000001",
"0b1944bfd71a97f00000000000000000"),
/* [ 69] */ new RandomConstant("e174318a79b1f7800000000000000001",
"1632897fae352fe00000000000000000"),
/* [ 70] */ new RandomConstant("c2e86314f363ef000000000000000001",
"2c6512ff5c6a5fc00000000000000000"),
/* [ 71] */ new RandomConstant("85d0c629e6c7de000000000000000001",
"58ca25feb8d4bf800000000000000000"),
/* [ 72] */ new RandomConstant("0ba18c53cd8fbc000000000000000001",
"b1944bfd71a97f000000000000000000"),
/* [ 73] */ new RandomConstant("174318a79b1f78000000000000000001",
"632897fae352fe000000000000000000"),
/* [ 74] */ new RandomConstant("2e86314f363ef0000000000000000001",
"c6512ff5c6a5fc000000000000000000"),
/* [ 75] */ new RandomConstant("5d0c629e6c7de0000000000000000001",
"8ca25feb8d4bf8000000000000000000"),
/* [ 76] */ new RandomConstant("ba18c53cd8fbc0000000000000000001",
"1944bfd71a97f0000000000000000000"),
/* [ 77] */ new RandomConstant("74318a79b1f780000000000000000001",
"32897fae352fe0000000000000000000"),
/* [ 78] */ new RandomConstant("e86314f363ef00000000000000000001",
"6512ff5c6a5fc0000000000000000000"),
/* [ 79] */ new RandomConstant("d0c629e6c7de00000000000000000001",
"ca25feb8d4bf80000000000000000000"),
/* [ 80] */ new RandomConstant("a18c53cd8fbc00000000000000000001",
"944bfd71a97f00000000000000000000"),
/* [ 81] */ new RandomConstant("4318a79b1f7800000000000000000001",
"2897fae352fe00000000000000000000"),
/* [ 82] */ new RandomConstant("86314f363ef000000000000000000001",
"512ff5c6a5fc00000000000000000000"),
/* [ 83] */ new RandomConstant("0c629e6c7de000000000000000000001",
"a25feb8d4bf800000000000000000000"),
/* [ 84] */ new RandomConstant("18c53cd8fbc000000000000000000001",
"44bfd71a97f000000000000000000000"),
/* [ 85] */ new RandomConstant("318a79b1f78000000000000000000001",
"897fae352fe000000000000000000000"),
/* [ 86] */ new RandomConstant("6314f363ef0000000000000000000001",
"12ff5c6a5fc000000000000000000000"),
/* [ 87] */ new RandomConstant("c629e6c7de0000000000000000000001",
"25feb8d4bf8000000000000000000000"),
/* [ 88] */ new RandomConstant("8c53cd8fbc0000000000000000000001",
"4bfd71a97f0000000000000000000000"),
/* [ 89] */ new RandomConstant("18a79b1f780000000000000000000001",
"97fae352fe0000000000000000000000"),
/* [ 90] */ new RandomConstant("314f363ef00000000000000000000001",
"2ff5c6a5fc0000000000000000000000"),
/* [ 91] */ new RandomConstant("629e6c7de00000000000000000000001",
"5feb8d4bf80000000000000000000000"),
/* [ 92] */ new RandomConstant("c53cd8fbc00000000000000000000001",
"bfd71a97f00000000000000000000000"),
/* [ 93] */ new RandomConstant("8a79b1f7800000000000000000000001",
"7fae352fe00000000000000000000000"),
/* [ 94] */ new RandomConstant("14f363ef000000000000000000000001",
"ff5c6a5fc00000000000000000000000"),
/* [ 95] */ new RandomConstant("29e6c7de000000000000000000000001",
"feb8d4bf800000000000000000000000"),
/* [ 96] */ new RandomConstant("53cd8fbc000000000000000000000001",
"fd71a97f000000000000000000000000"),
/* [ 97] */ new RandomConstant("a79b1f78000000000000000000000001",
"fae352fe000000000000000000000000"),
/* [ 98] */ new RandomConstant("4f363ef0000000000000000000000001",
"f5c6a5fc000000000000000000000000"),
/* [ 99] */ new RandomConstant("9e6c7de0000000000000000000000001",
"eb8d4bf8000000000000000000000000"),
/* [100] */ new RandomConstant("3cd8fbc0000000000000000000000001",
"d71a97f0000000000000000000000000"),
/* [101] */ new RandomConstant("79b1f780000000000000000000000001",
"ae352fe0000000000000000000000000"),
/* [102] */ new RandomConstant("f363ef00000000000000000000000001",
"5c6a5fc0000000000000000000000000"),
/* [103] */ new RandomConstant("e6c7de00000000000000000000000001",
"b8d4bf80000000000000000000000000"),
/* [104] */ new RandomConstant("cd8fbc00000000000000000000000001",
"71a97f00000000000000000000000000"),
/* [105] */ new RandomConstant("9b1f7800000000000000000000000001",
"e352fe00000000000000000000000000"),
/* [106] */ new RandomConstant("363ef000000000000000000000000001",
"c6a5fc00000000000000000000000000"),
/* [107] */ new RandomConstant("6c7de000000000000000000000000001",
"8d4bf800000000000000000000000000"),
/* [108] */ new RandomConstant("d8fbc000000000000000000000000001",
"1a97f000000000000000000000000000"),
/* [109] */ new RandomConstant("b1f78000000000000000000000000001",
"352fe000000000000000000000000000"),
/* [110] */ new RandomConstant("63ef0000000000000000000000000001",
"6a5fc000000000000000000000000000"),
/* [111] */ new RandomConstant("c7de0000000000000000000000000001",
"d4bf8000000000000000000000000000"),
/* [112] */ new RandomConstant("8fbc0000000000000000000000000001",
"a97f0000000000000000000000000000"),
/* [113] */ new RandomConstant("1f780000000000000000000000000001",
"52fe0000000000000000000000000000"),
/* [114] */ new RandomConstant("3ef00000000000000000000000000001",
"a5fc0000000000000000000000000000"),
/* [115] */ new RandomConstant("7de00000000000000000000000000001",
"4bf80000000000000000000000000000"),
/* [116] */ new RandomConstant("fbc00000000000000000000000000001",
"97f00000000000000000000000000000"),
/* [117] */ new RandomConstant("f7800000000000000000000000000001",
"2fe00000000000000000000000000000"),
/* [118] */ new RandomConstant("ef000000000000000000000000000001",
"5fc00000000000000000000000000000"),
/* [119] */ new RandomConstant("de000000000000000000000000000001",
"bf800000000000000000000000000000"),
/* [120] */ new RandomConstant("bc000000000000000000000000000001",
"7f000000000000000000000000000000"),
/* [121] */ new RandomConstant("78000000000000000000000000000001",
"fe000000000000000000000000000000"),
/* [122] */ new RandomConstant("f0000000000000000000000000000001",
"fc000000000000000000000000000000"),
/* [123] */ new RandomConstant("e0000000000000000000000000000001",
"f8000000000000000000000000000000"),
/* [124] */ new RandomConstant("c0000000000000000000000000000001",
"f0000000000000000000000000000000"),
/* [125] */ new RandomConstant("80000000000000000000000000000001",
"e0000000000000000000000000000000"),
/* [126] */ new RandomConstant("00000000000000000000000000000001",
"c0000000000000000000000000000000"),
/* [127] */ new RandomConstant("00000000000000000000000000000001",
"80000000000000000000000000000000")};
/**
* generate the random number that is "advance" steps
* from an initial random number of 0. This is done by
* starting with 0, and then advancing the by the
* appropriate powers of 2 of the linear congruential
* generator.
*/
public static Unsigned16 skipAhead(Unsigned16 advance) {
Unsigned16 result = new Unsigned16();
long bit_map;
bit_map = advance.getLow8();
for (int i = 0; bit_map != 0 && i < 64; i++) {
if ((bit_map & (1L << i)) != 0) {
/* advance random number by f**(2**i) (x)
*/
result.multiply(genArray[i].a);
result.add(genArray[i].c);
bit_map &= ~(1L << i);
}
}
bit_map = advance.getHigh8();
for (int i = 0; bit_map != 0 && i < 64; i++)
{
if ((bit_map & (1L << i)) != 0) {
/* advance random number by f**(2**(i + 64)) (x)
*/
result.multiply(genArray[i+64].a);
result.add(genArray[i+64].c);
bit_map &= ~(1L << i);
}
}
return result;
}
/**
* Generate the next 16 byte random number.
*/
public static void nextRand(Unsigned16 rand) {
/* advance the random number forward once using the linear congruential
* generator, and then return the new random number
*/
rand.multiply(genArray[0].a);
rand.add(genArray[0].c);
}
}
| 22,566 | 59.178667 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.*;
import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
import com.google.common.base.Charsets;
class TeraScheduler {
private static final Log LOG = LogFactory.getLog(TeraScheduler.class);
private Split[] splits;
private List<Host> hosts = new ArrayList<Host>();
private int slotsPerHost;
private int remainingSplits = 0;
private FileSplit[] realSplits = null;
static class Split {
String filename;
boolean isAssigned = false;
List<Host> locations = new ArrayList<Host>();
Split(String filename) {
this.filename = filename;
}
public String toString() {
StringBuffer result = new StringBuffer();
result.append(filename);
result.append(" on ");
for(Host host: locations) {
result.append(host.hostname);
result.append(", ");
}
return result.toString();
}
}
static class Host {
String hostname;
List<Split> splits = new ArrayList<Split>();
Host(String hostname) {
this.hostname = hostname;
}
public String toString() {
StringBuffer result = new StringBuffer();
result.append(splits.size());
result.append(" ");
result.append(hostname);
return result.toString();
}
}
List<String> readFile(String filename) throws IOException {
List<String> result = new ArrayList<String>(10000);
BufferedReader in = new BufferedReader(
new InputStreamReader(new FileInputStream(filename), Charsets.UTF_8));
String line = in.readLine();
while (line != null) {
result.add(line);
line = in.readLine();
}
in.close();
return result;
}
public TeraScheduler(String splitFilename,
String nodeFilename) throws IOException {
slotsPerHost = 4;
// get the hosts
Map<String, Host> hostIds = new HashMap<String,Host>();
for(String hostName: readFile(nodeFilename)) {
Host host = new Host(hostName);
hosts.add(host);
hostIds.put(hostName, host);
}
// read the blocks
List<String> splitLines = readFile(splitFilename);
splits = new Split[splitLines.size()];
remainingSplits = 0;
for(String line: splitLines) {
StringTokenizer itr = new StringTokenizer(line);
Split newSplit = new Split(itr.nextToken());
splits[remainingSplits++] = newSplit;
while (itr.hasMoreTokens()) {
Host host = hostIds.get(itr.nextToken());
newSplit.locations.add(host);
host.splits.add(newSplit);
}
}
}
public TeraScheduler(FileSplit[] realSplits,
Configuration conf) throws IOException {
this.realSplits = realSplits;
this.slotsPerHost = conf.getInt(TTConfig.TT_MAP_SLOTS, 4);
Map<String, Host> hostTable = new HashMap<String, Host>();
splits = new Split[realSplits.length];
for(FileSplit realSplit: realSplits) {
Split split = new Split(realSplit.getPath().toString());
splits[remainingSplits++] = split;
for(String hostname: realSplit.getLocations()) {
Host host = hostTable.get(hostname);
if (host == null) {
host = new Host(hostname);
hostTable.put(hostname, host);
hosts.add(host);
}
host.splits.add(split);
split.locations.add(host);
}
}
}
Host pickBestHost() {
Host result = null;
int splits = Integer.MAX_VALUE;
for(Host host: hosts) {
if (host.splits.size() < splits) {
result = host;
splits = host.splits.size();
}
}
if (result != null) {
hosts.remove(result);
LOG.debug("picking " + result);
}
return result;
}
void pickBestSplits(Host host) {
int tasksToPick = Math.min(slotsPerHost,
(int) Math.ceil((double) remainingSplits /
hosts.size()));
Split[] best = new Split[tasksToPick];
for(Split cur: host.splits) {
LOG.debug(" examine: " + cur.filename + " " + cur.locations.size());
int i = 0;
while (i < tasksToPick && best[i] != null &&
best[i].locations.size() <= cur.locations.size()) {
i += 1;
}
if (i < tasksToPick) {
for(int j = tasksToPick - 1; j > i; --j) {
best[j] = best[j-1];
}
best[i] = cur;
}
}
// for the chosen blocks, remove them from the other locations
for(int i=0; i < tasksToPick; ++i) {
if (best[i] != null) {
LOG.debug(" best: " + best[i].filename);
for (Host other: best[i].locations) {
other.splits.remove(best[i]);
}
best[i].locations.clear();
best[i].locations.add(host);
best[i].isAssigned = true;
remainingSplits -= 1;
}
}
// for the non-chosen blocks, remove this host
for(Split cur: host.splits) {
if (!cur.isAssigned) {
cur.locations.remove(host);
}
}
}
void solve() throws IOException {
Host host = pickBestHost();
while (host != null) {
pickBestSplits(host);
host = pickBestHost();
}
}
/**
* Solve the schedule and modify the FileSplit array to reflect the new
* schedule. It will move placed splits to front and unplacable splits
* to the end.
* @return a new list of FileSplits that are modified to have the
* best host as the only host.
* @throws IOException
*/
public List<InputSplit> getNewFileSplits() throws IOException {
solve();
FileSplit[] result = new FileSplit[realSplits.length];
int left = 0;
int right = realSplits.length - 1;
for(int i=0; i < splits.length; ++i) {
if (splits[i].isAssigned) {
// copy the split and fix up the locations
String[] newLocations = {splits[i].locations.get(0).hostname};
realSplits[i] = new FileSplit(realSplits[i].getPath(),
realSplits[i].getStart(), realSplits[i].getLength(), newLocations);
result[left++] = realSplits[i];
} else {
result[right--] = realSplits[i];
}
}
List<InputSplit> ret = new ArrayList<InputSplit>();
for (FileSplit fs : result) {
ret.add(fs);
}
return ret;
}
public static void main(String[] args) throws IOException {
TeraScheduler problem = new TeraScheduler("block-loc.txt", "nodes");
for(Host host: problem.hosts) {
System.out.println(host);
}
LOG.info("starting solve");
problem.solve();
List<Split> leftOvers = new ArrayList<Split>();
for(int i=0; i < problem.splits.length; ++i) {
if (problem.splits[i].isAssigned) {
System.out.println("sched: " + problem.splits[i]);
} else {
leftOvers.add(problem.splits[i]);
}
}
for(Split cur: leftOvers) {
System.out.println("left: " + cur);
}
System.out.println("left over: " + leftOvers.size());
LOG.info("done");
}
}
| 8,035 | 30.762846 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/Unsigned16.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
/**
* An unsigned 16 byte integer class that supports addition, multiplication,
* and left shifts.
*/
class Unsigned16 implements Writable {
private long hi8;
private long lo8;
public Unsigned16() {
hi8 = 0;
lo8 = 0;
}
public Unsigned16(long l) {
hi8 = 0;
lo8 = l;
}
public Unsigned16(Unsigned16 other) {
hi8 = other.hi8;
lo8 = other.lo8;
}
@Override
public boolean equals(Object o) {
if (o instanceof Unsigned16) {
Unsigned16 other = (Unsigned16) o;
return other.hi8 == hi8 && other.lo8 == lo8;
}
return false;
}
@Override
public int hashCode() {
return (int) lo8;
}
/**
* Parse a hex string
* @param s the hex string
*/
public Unsigned16(String s) throws NumberFormatException {
set(s);
}
/**
* Set the number from a hex string
* @param s the number in hexadecimal
* @throws NumberFormatException if the number is invalid
*/
public void set(String s) throws NumberFormatException {
hi8 = 0;
lo8 = 0;
final long lastDigit = 0xfl << 60;
for (int i = 0; i < s.length(); ++i) {
int digit = getHexDigit(s.charAt(i));
if ((lastDigit & hi8) != 0) {
throw new NumberFormatException(s + " overflowed 16 bytes");
}
hi8 <<= 4;
hi8 |= (lo8 & lastDigit) >>> 60;
lo8 <<= 4;
lo8 |= digit;
}
}
/**
* Set the number to a given long.
* @param l the new value, which is treated as an unsigned number
*/
public void set(long l) {
lo8 = l;
hi8 = 0;
}
/**
* Map a hexadecimal character into a digit.
* @param ch the character
* @return the digit from 0 to 15
* @throws NumberFormatException
*/
private static int getHexDigit(char ch) throws NumberFormatException {
if (ch >= '0' && ch <= '9') {
return ch - '0';
}
if (ch >= 'a' && ch <= 'f') {
return ch - 'a' + 10;
}
if (ch >= 'A' && ch <= 'F') {
return ch - 'A' + 10;
}
throw new NumberFormatException(ch + " is not a valid hex digit");
}
private static final Unsigned16 TEN = new Unsigned16(10);
public static Unsigned16 fromDecimal(String s) throws NumberFormatException {
Unsigned16 result = new Unsigned16();
Unsigned16 tmp = new Unsigned16();
for(int i=0; i < s.length(); i++) {
char ch = s.charAt(i);
if (ch < '0' || ch > '9') {
throw new NumberFormatException(ch + " not a valid decimal digit");
}
int digit = ch - '0';
result.multiply(TEN);
tmp.set(digit);
result.add(tmp);
}
return result;
}
/**
* Return the number as a hex string.
*/
public String toString() {
if (hi8 == 0) {
return Long.toHexString(lo8);
} else {
StringBuilder result = new StringBuilder();
result.append(Long.toHexString(hi8));
String loString = Long.toHexString(lo8);
for(int i=loString.length(); i < 16; ++i) {
result.append('0');
}
result.append(loString);
return result.toString();
}
}
/**
* Get a given byte from the number.
* @param b the byte to get with 0 meaning the most significant byte
* @return the byte or 0 if b is outside of 0..15
*/
public byte getByte(int b) {
if (b >= 0 && b < 16) {
if (b < 8) {
return (byte) (hi8 >> (56 - 8*b));
} else {
return (byte) (lo8 >> (120 - 8*b));
}
}
return 0;
}
/**
* Get the hexadecimal digit at the given position.
* @param p the digit position to get with 0 meaning the most significant
* @return the character or '0' if p is outside of 0..31
*/
public char getHexDigit(int p) {
byte digit = getByte(p / 2);
if (p % 2 == 0) {
digit >>>= 4;
}
digit &= 0xf;
if (digit < 10) {
return (char) ('0' + digit);
} else {
return (char) ('A' + digit - 10);
}
}
/**
* Get the high 8 bytes as a long.
*/
public long getHigh8() {
return hi8;
}
/**
* Get the low 8 bytes as a long.
*/
public long getLow8() {
return lo8;
}
/**
* Multiple the current number by a 16 byte unsigned integer. Overflow is not
* detected and the result is the low 16 bytes of the result. The numbers
* are divided into 32 and 31 bit chunks so that the product of two chucks
* fits in the unsigned 63 bits of a long.
* @param b the other number
*/
void multiply(Unsigned16 b) {
// divide the left into 4 32 bit chunks
long[] left = new long[4];
left[0] = lo8 & 0xffffffffl;
left[1] = lo8 >>> 32;
left[2] = hi8 & 0xffffffffl;
left[3] = hi8 >>> 32;
// divide the right into 5 31 bit chunks
long[] right = new long[5];
right[0] = b.lo8 & 0x7fffffffl;
right[1] = (b.lo8 >>> 31) & 0x7fffffffl;
right[2] = (b.lo8 >>> 62) + ((b.hi8 & 0x1fffffffl) << 2);
right[3] = (b.hi8 >>> 29) & 0x7fffffffl;
right[4] = (b.hi8 >>> 60);
// clear the cur value
set(0);
Unsigned16 tmp = new Unsigned16();
for(int l=0; l < 4; ++l) {
for (int r=0; r < 5; ++r) {
long prod = left[l] * right[r];
if (prod != 0) {
int off = l*32 + r*31;
tmp.set(prod);
tmp.shiftLeft(off);
add(tmp);
}
}
}
}
/**
* Add the given number into the current number.
* @param b the other number
*/
public void add(Unsigned16 b) {
long sumHi;
long sumLo;
long reshibit, hibit0, hibit1;
sumHi = hi8 + b.hi8;
hibit0 = (lo8 & 0x8000000000000000L);
hibit1 = (b.lo8 & 0x8000000000000000L);
sumLo = lo8 + b.lo8;
reshibit = (sumLo & 0x8000000000000000L);
if ((hibit0 & hibit1) != 0 | ((hibit0 ^ hibit1) != 0 && reshibit == 0))
sumHi++; /* add carry bit */
hi8 = sumHi;
lo8 = sumLo;
}
/**
* Shift the number a given number of bit positions. The number is the low
* order bits of the result.
* @param bits the bit positions to shift by
*/
public void shiftLeft(int bits) {
if (bits != 0) {
if (bits < 64) {
hi8 <<= bits;
hi8 |= (lo8 >>> (64 - bits));
lo8 <<= bits;
} else if (bits < 128) {
hi8 = lo8 << (bits - 64);
lo8 = 0;
} else {
hi8 = 0;
lo8 = 0;
}
}
}
@Override
public void readFields(DataInput in) throws IOException {
hi8 = in.readLong();
lo8 = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(hi8);
out.writeLong(lo8);
}
}
| 7,526 | 24.258389 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.IndexedSortable;
import org.apache.hadoop.util.QuickSort;
import org.apache.hadoop.util.StringUtils;
/**
* An input format that reads the first 10 characters of each line as the key
* and the rest of the line as the value. Both key and value are represented
* as Text.
*/
public class TeraInputFormat extends FileInputFormat<Text,Text> {
static final String PARTITION_FILENAME = "_partition.lst";
static final int KEY_LENGTH = 10;
static final int VALUE_LENGTH = 90;
static final int RECORD_LENGTH = KEY_LENGTH + VALUE_LENGTH;
private static MRJobConfig lastContext = null;
private static List<InputSplit> lastResult = null;
static class TextSampler implements IndexedSortable {
private ArrayList<Text> records = new ArrayList<Text>();
public int compare(int i, int j) {
Text left = records.get(i);
Text right = records.get(j);
return left.compareTo(right);
}
public void swap(int i, int j) {
Text left = records.get(i);
Text right = records.get(j);
records.set(j, left);
records.set(i, right);
}
public void addKey(Text key) {
synchronized (this) {
records.add(new Text(key));
}
}
/**
* Find the split points for a given sample. The sample keys are sorted
* and down sampled to find even split points for the partitions. The
* returned keys should be the start of their respective partitions.
* @param numPartitions the desired number of partitions
* @return an array of size numPartitions - 1 that holds the split points
*/
Text[] createPartitions(int numPartitions) {
int numRecords = records.size();
System.out.println("Making " + numPartitions + " from " + numRecords +
" sampled records");
if (numPartitions > numRecords) {
throw new IllegalArgumentException
("Requested more partitions than input keys (" + numPartitions +
" > " + numRecords + ")");
}
new QuickSort().sort(this, 0, records.size());
float stepSize = numRecords / (float) numPartitions;
Text[] result = new Text[numPartitions-1];
for(int i=1; i < numPartitions; ++i) {
result[i-1] = records.get(Math.round(stepSize * i));
}
return result;
}
}
/**
* Use the input splits to take samples of the input and generate sample
* keys. By default reads 100,000 keys from 10 locations in the input, sorts
* them and picks N-1 keys to generate N equally sized partitions.
* @param job the job to sample
* @param partFile where to write the output file to
* @throws Throwable if something goes wrong
*/
public static void writePartitionFile(final JobContext job,
Path partFile) throws Throwable {
long t1 = System.currentTimeMillis();
Configuration conf = job.getConfiguration();
final TeraInputFormat inFormat = new TeraInputFormat();
final TextSampler sampler = new TextSampler();
int partitions = job.getNumReduceTasks();
long sampleSize =
conf.getLong(TeraSortConfigKeys.SAMPLE_SIZE.key(),
TeraSortConfigKeys.DEFAULT_SAMPLE_SIZE);
final List<InputSplit> splits = inFormat.getSplits(job);
long t2 = System.currentTimeMillis();
System.out.println("Computing input splits took " + (t2 - t1) + "ms");
int samples =
Math.min(conf.getInt(TeraSortConfigKeys.NUM_PARTITIONS.key(),
TeraSortConfigKeys.DEFAULT_NUM_PARTITIONS),
splits.size());
System.out.println("Sampling " + samples + " splits of " + splits.size());
final long recordsPerSample = sampleSize / samples;
final int sampleStep = splits.size() / samples;
Thread[] samplerReader = new Thread[samples];
SamplerThreadGroup threadGroup = new SamplerThreadGroup("Sampler Reader Thread Group");
// take N samples from different parts of the input
for(int i=0; i < samples; ++i) {
final int idx = i;
samplerReader[i] =
new Thread (threadGroup,"Sampler Reader " + idx) {
{
setDaemon(true);
}
public void run() {
long records = 0;
try {
TaskAttemptContext context = new TaskAttemptContextImpl(
job.getConfiguration(), new TaskAttemptID());
RecordReader<Text, Text> reader =
inFormat.createRecordReader(splits.get(sampleStep * idx),
context);
reader.initialize(splits.get(sampleStep * idx), context);
while (reader.nextKeyValue()) {
sampler.addKey(new Text(reader.getCurrentKey()));
records += 1;
if (recordsPerSample <= records) {
break;
}
}
} catch (IOException ie){
System.err.println("Got an exception while reading splits " +
StringUtils.stringifyException(ie));
throw new RuntimeException(ie);
} catch (InterruptedException e) {
}
}
};
samplerReader[i].start();
}
FileSystem outFs = partFile.getFileSystem(conf);
DataOutputStream writer = outFs.create(partFile, true, 64*1024, (short) 10,
outFs.getDefaultBlockSize(partFile));
for (int i = 0; i < samples; i++) {
try {
samplerReader[i].join();
if(threadGroup.getThrowable() != null){
throw threadGroup.getThrowable();
}
} catch (InterruptedException e) {
}
}
for(Text split : sampler.createPartitions(partitions)) {
split.write(writer);
}
writer.close();
long t3 = System.currentTimeMillis();
System.out.println("Computing parititions took " + (t3 - t2) + "ms");
}
static class SamplerThreadGroup extends ThreadGroup{
private Throwable throwable;
public SamplerThreadGroup(String s) {
super(s);
}
@Override
public void uncaughtException(Thread thread, Throwable throwable) {
this.throwable = throwable;
}
public Throwable getThrowable() {
return this.throwable;
}
}
static class TeraRecordReader extends RecordReader<Text,Text> {
private FSDataInputStream in;
private long offset;
private long length;
private static final int RECORD_LENGTH = KEY_LENGTH + VALUE_LENGTH;
private byte[] buffer = new byte[RECORD_LENGTH];
private Text key;
private Text value;
public TeraRecordReader() throws IOException {
}
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
Path p = ((FileSplit)split).getPath();
FileSystem fs = p.getFileSystem(context.getConfiguration());
in = fs.open(p);
long start = ((FileSplit)split).getStart();
// find the offset to start at a record boundary
offset = (RECORD_LENGTH - (start % RECORD_LENGTH)) % RECORD_LENGTH;
in.seek(start + offset);
length = ((FileSplit)split).getLength();
}
public void close() throws IOException {
in.close();
}
public Text getCurrentKey() {
return key;
}
public Text getCurrentValue() {
return value;
}
public float getProgress() throws IOException {
return (float) offset / length;
}
public boolean nextKeyValue() throws IOException {
if (offset >= length) {
return false;
}
int read = 0;
while (read < RECORD_LENGTH) {
long newRead = in.read(buffer, read, RECORD_LENGTH - read);
if (newRead == -1) {
if (read == 0) {
return false;
} else {
throw new EOFException("read past eof");
}
}
read += newRead;
}
if (key == null) {
key = new Text();
}
if (value == null) {
value = new Text();
}
key.set(buffer, 0, KEY_LENGTH);
value.set(buffer, KEY_LENGTH, VALUE_LENGTH);
offset += RECORD_LENGTH;
return true;
}
}
@Override
public RecordReader<Text, Text>
createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException {
return new TeraRecordReader();
}
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
if (job == lastContext) {
return lastResult;
}
long t1, t2, t3;
t1 = System.currentTimeMillis();
lastContext = job;
lastResult = super.getSplits(job);
t2 = System.currentTimeMillis();
System.out.println("Spent " + (t2 - t1) + "ms computing base-splits.");
if (job.getConfiguration().getBoolean(TeraSortConfigKeys.USE_TERA_SCHEDULER.key(),
TeraSortConfigKeys.DEFAULT_USE_TERA_SCHEDULER)) {
TeraScheduler scheduler = new TeraScheduler(
lastResult.toArray(new FileSplit[0]), job.getConfiguration());
lastResult = scheduler.getNewFileSplits();
t3 = System.currentTimeMillis();
System.out.println("Spent " + (t3 - t2) + "ms computing TeraScheduler splits.");
}
return lastResult;
}
}
| 10,850 | 34.116505 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Generates the sampled split points, launches the job, and waits for it to
* finish.
* <p>
* To run the program:
* <b>bin/hadoop jar hadoop-*-examples.jar terasort in-dir out-dir</b>
*/
public class TeraSort extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(TeraSort.class);
/**
* A partitioner that splits text keys into roughly equal partitions
* in a global sorted order.
*/
static class TotalOrderPartitioner extends Partitioner<Text,Text>
implements Configurable {
private TrieNode trie;
private Text[] splitPoints;
private Configuration conf;
/**
* A generic trie node
*/
static abstract class TrieNode {
private int level;
TrieNode(int level) {
this.level = level;
}
abstract int findPartition(Text key);
abstract void print(PrintStream strm) throws IOException;
int getLevel() {
return level;
}
}
/**
* An inner trie node that contains 256 children based on the next
* character.
*/
static class InnerTrieNode extends TrieNode {
private TrieNode[] child = new TrieNode[256];
InnerTrieNode(int level) {
super(level);
}
int findPartition(Text key) {
int level = getLevel();
if (key.getLength() <= level) {
return child[0].findPartition(key);
}
return child[key.getBytes()[level] & 0xff].findPartition(key);
}
void setChild(int idx, TrieNode child) {
this.child[idx] = child;
}
void print(PrintStream strm) throws IOException {
for(int ch=0; ch < 256; ++ch) {
for(int i = 0; i < 2*getLevel(); ++i) {
strm.print(' ');
}
strm.print(ch);
strm.println(" ->");
if (child[ch] != null) {
child[ch].print(strm);
}
}
}
}
/**
* A leaf trie node that does string compares to figure out where the given
* key belongs between lower..upper.
*/
static class LeafTrieNode extends TrieNode {
int lower;
int upper;
Text[] splitPoints;
LeafTrieNode(int level, Text[] splitPoints, int lower, int upper) {
super(level);
this.splitPoints = splitPoints;
this.lower = lower;
this.upper = upper;
}
int findPartition(Text key) {
for(int i=lower; i<upper; ++i) {
if (splitPoints[i].compareTo(key) > 0) {
return i;
}
}
return upper;
}
void print(PrintStream strm) throws IOException {
for(int i = 0; i < 2*getLevel(); ++i) {
strm.print(' ');
}
strm.print(lower);
strm.print(", ");
strm.println(upper);
}
}
/**
* Read the cut points from the given sequence file.
* @param fs the file system
* @param p the path to read
* @param conf the job config
* @return the strings to split the partitions on
* @throws IOException
*/
private static Text[] readPartitions(FileSystem fs, Path p,
Configuration conf) throws IOException {
int reduces = conf.getInt(MRJobConfig.NUM_REDUCES, 1);
Text[] result = new Text[reduces - 1];
DataInputStream reader = fs.open(p);
for(int i=0; i < reduces - 1; ++i) {
result[i] = new Text();
result[i].readFields(reader);
}
reader.close();
return result;
}
/**
* Given a sorted set of cut points, build a trie that will find the correct
* partition quickly.
* @param splits the list of cut points
* @param lower the lower bound of partitions 0..numPartitions-1
* @param upper the upper bound of partitions 0..numPartitions-1
* @param prefix the prefix that we have already checked against
* @param maxDepth the maximum depth we will build a trie for
* @return the trie node that will divide the splits correctly
*/
private static TrieNode buildTrie(Text[] splits, int lower, int upper,
Text prefix, int maxDepth) {
int depth = prefix.getLength();
if (depth >= maxDepth || lower == upper) {
return new LeafTrieNode(depth, splits, lower, upper);
}
InnerTrieNode result = new InnerTrieNode(depth);
Text trial = new Text(prefix);
// append an extra byte on to the prefix
trial.append(new byte[1], 0, 1);
int currentBound = lower;
for(int ch = 0; ch < 255; ++ch) {
trial.getBytes()[depth] = (byte) (ch + 1);
lower = currentBound;
while (currentBound < upper) {
if (splits[currentBound].compareTo(trial) >= 0) {
break;
}
currentBound += 1;
}
trial.getBytes()[depth] = (byte) ch;
result.child[ch] = buildTrie(splits, lower, currentBound, trial,
maxDepth);
}
// pick up the rest
trial.getBytes()[depth] = (byte) 255;
result.child[255] = buildTrie(splits, currentBound, upper, trial,
maxDepth);
return result;
}
public void setConf(Configuration conf) {
try {
FileSystem fs = FileSystem.getLocal(conf);
this.conf = conf;
Path partFile = new Path(TeraInputFormat.PARTITION_FILENAME);
splitPoints = readPartitions(fs, partFile, conf);
trie = buildTrie(splitPoints, 0, splitPoints.length, new Text(), 2);
} catch (IOException ie) {
throw new IllegalArgumentException("can't read partitions file", ie);
}
}
public Configuration getConf() {
return conf;
}
public TotalOrderPartitioner() {
}
public int getPartition(Text key, Text value, int numPartitions) {
return trie.findPartition(key);
}
}
/**
* A total order partitioner that assigns keys based on their first
* PREFIX_LENGTH bytes, assuming a flat distribution.
*/
public static class SimplePartitioner extends Partitioner<Text, Text>
implements Configurable {
int prefixesPerReduce;
private static final int PREFIX_LENGTH = 3;
private Configuration conf = null;
public void setConf(Configuration conf) {
this.conf = conf;
prefixesPerReduce = (int) Math.ceil((1 << (8 * PREFIX_LENGTH)) /
(float) conf.getInt(MRJobConfig.NUM_REDUCES, 1));
}
public Configuration getConf() {
return conf;
}
@Override
public int getPartition(Text key, Text value, int numPartitions) {
byte[] bytes = key.getBytes();
int len = Math.min(PREFIX_LENGTH, key.getLength());
int prefix = 0;
for(int i=0; i < len; ++i) {
prefix = (prefix << 8) | (0xff & bytes[i]);
}
return prefix / prefixesPerReduce;
}
}
public static boolean getUseSimplePartitioner(JobContext job) {
return job.getConfiguration().getBoolean(
TeraSortConfigKeys.USE_SIMPLE_PARTITIONER.key(),
TeraSortConfigKeys.DEFAULT_USE_SIMPLE_PARTITIONER);
}
public static void setUseSimplePartitioner(Job job, boolean value) {
job.getConfiguration().setBoolean(
TeraSortConfigKeys.USE_SIMPLE_PARTITIONER.key(), value);
}
public static int getOutputReplication(JobContext job) {
return job.getConfiguration().getInt(
TeraSortConfigKeys.OUTPUT_REPLICATION.key(),
TeraSortConfigKeys.DEFAULT_OUTPUT_REPLICATION);
}
public static void setOutputReplication(Job job, int value) {
job.getConfiguration().setInt(TeraSortConfigKeys.OUTPUT_REPLICATION.key(),
value);
}
private static void usage() throws IOException {
System.err.println("Usage: terasort [-Dproperty=value] <in> <out>");
System.err.println("TeraSort configurations are:");
for (TeraSortConfigKeys teraSortConfigKeys : TeraSortConfigKeys.values()) {
System.err.println(teraSortConfigKeys.toString());
}
}
public int run(String[] args) throws Exception {
if (args.length != 2) {
usage();
return 2;
}
LOG.info("starting");
Job job = Job.getInstance(getConf());
Path inputDir = new Path(args[0]);
Path outputDir = new Path(args[1]);
boolean useSimplePartitioner = getUseSimplePartitioner(job);
TeraInputFormat.setInputPaths(job, inputDir);
FileOutputFormat.setOutputPath(job, outputDir);
job.setJobName("TeraSort");
job.setJarByClass(TeraSort.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(TeraInputFormat.class);
job.setOutputFormatClass(TeraOutputFormat.class);
if (useSimplePartitioner) {
job.setPartitionerClass(SimplePartitioner.class);
} else {
long start = System.currentTimeMillis();
Path partitionFile = new Path(outputDir,
TeraInputFormat.PARTITION_FILENAME);
URI partitionUri = new URI(partitionFile.toString() +
"#" + TeraInputFormat.PARTITION_FILENAME);
try {
TeraInputFormat.writePartitionFile(job, partitionFile);
} catch (Throwable e) {
LOG.error(e.getMessage());
return -1;
}
job.addCacheFile(partitionUri);
long end = System.currentTimeMillis();
System.out.println("Spent " + (end - start) + "ms computing partitions.");
job.setPartitionerClass(TotalOrderPartitioner.class);
}
job.getConfiguration().setInt("dfs.replication", getOutputReplication(job));
int ret = job.waitForCompletion(true) ? 0 : 1;
LOG.info("done");
return ret;
}
/**
* @param args
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new TeraSort(), args);
System.exit(res);
}
}
| 11,488 | 32.301449 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.terasort;
import java.io.IOException;
import java.util.zip.Checksum;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.PureJavaCrc32;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class TeraChecksum extends Configured implements Tool {
static class ChecksumMapper
extends Mapper<Text, Text, NullWritable, Unsigned16> {
private Unsigned16 checksum = new Unsigned16();
private Unsigned16 sum = new Unsigned16();
private Checksum crc32 = new PureJavaCrc32();
public void map(Text key, Text value,
Context context) throws IOException {
crc32.reset();
crc32.update(key.getBytes(), 0, key.getLength());
crc32.update(value.getBytes(), 0, value.getLength());
checksum.set(crc32.getValue());
sum.add(checksum);
}
public void cleanup(Context context)
throws IOException, InterruptedException {
context.write(NullWritable.get(), sum);
}
}
static class ChecksumReducer
extends Reducer<NullWritable, Unsigned16, NullWritable, Unsigned16> {
public void reduce(NullWritable key, Iterable<Unsigned16> values,
Context context) throws IOException, InterruptedException {
Unsigned16 sum = new Unsigned16();
for (Unsigned16 val : values) {
sum.add(val);
}
context.write(key, sum);
}
}
private static void usage() throws IOException {
System.err.println("terasum <out-dir> <report-dir>");
}
public int run(String[] args) throws Exception {
Job job = Job.getInstance(getConf());
if (args.length != 2) {
usage();
return 2;
}
TeraInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setJobName("TeraSum");
job.setJarByClass(TeraChecksum.class);
job.setMapperClass(ChecksumMapper.class);
job.setReducerClass(ChecksumReducer.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Unsigned16.class);
// force a single reducer
job.setNumReduceTasks(1);
job.setInputFormatClass(TeraInputFormat.class);
return job.waitForCompletion(true) ? 0 : 1;
}
/**
* @param args
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new TeraChecksum(), args);
System.exit(res);
}
}
| 3,644 | 33.714286 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/TaskResult.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.examples.pi.math.Summation;
import org.apache.hadoop.io.Writable;
/** A class for map task results or reduce task results. */
public class TaskResult implements Container<Summation>, Combinable<TaskResult>, Writable {
private Summation sigma;
private long duration;
public TaskResult() {}
TaskResult(Summation sigma, long duration) {
this.sigma = sigma;
this.duration = duration;
}
/** {@inheritDoc} */
@Override
public Summation getElement() {return sigma;}
/** @return The time duration used */
long getDuration() {return duration;}
/** {@inheritDoc} */
@Override
public int compareTo(TaskResult that) {
return this.sigma.compareTo(that.sigma);
}
/** {@inheritDoc} */
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
else if (obj != null && obj instanceof TaskResult) {
final TaskResult that = (TaskResult)obj;
return this.compareTo(that) == 0;
}
throw new IllegalArgumentException(obj == null? "obj == null":
"obj.getClass()=" + obj.getClass());
}
/** Not supported */
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
/** {@inheritDoc} */
@Override
public TaskResult combine(TaskResult that) {
final Summation s = sigma.combine(that.sigma);
return s == null? null: new TaskResult(s, this.duration + that.duration);
}
/** {@inheritDoc} */
@Override
public void readFields(DataInput in) throws IOException {
sigma = SummationWritable.read(in);
duration = in.readLong();
}
/** {@inheritDoc} */
@Override
public void write(DataOutput out) throws IOException {
SummationWritable.write(sigma, out);
out.writeLong(duration);
}
/** {@inheritDoc} */
@Override
public String toString() {
return "sigma=" + sigma + ", duration=" + duration + "(" + Util.millis2String(duration) + ")";
}
/** Covert a String to a TaskResult */
public static TaskResult valueOf(String s) {
int i = 0;
int j = s.indexOf(", duration=");
if (j < 0)
throw new IllegalArgumentException("i=" + i + ", j=" + j + " < 0, s=" + s);
final Summation sigma = Summation.valueOf(Util.parseStringVariable("sigma", s.substring(i, j)));
i = j + 2;
j = s.indexOf("(", i);
if (j < 0)
throw new IllegalArgumentException("i=" + i + ", j=" + j + " < 0, s=" + s);
final long duration = Util.parseLongVariable("duration", s.substring(i, j));
return new TaskResult(sigma, duration);
}
}
| 3,480 | 29.535088 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.examples.pi.math.Summation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* The main class for computing sums using map/reduce jobs.
* A sum is partitioned into jobs.
* A job may be executed on the map-side or on the reduce-side.
* A map-side job has multiple maps and zero reducer.
* A reduce-side job has one map and multiple reducers.
* Depending on the clusters status in runtime,
* a mix-type job may be executed on either side.
*/
public final class DistSum extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(DistSum.class);
private static final String NAME = DistSum.class.getSimpleName();
private static final String N_PARTS = "mapreduce.pi." + NAME + ".nParts";
/////////////////////////////////////////////////////////////////////////////
/** DistSum job parameters */
static class Parameters {
static final int COUNT = 6;
static final String LIST = "<nThreads> <nJobs> <type> <nPart> <remoteDir> <localDir>";
static final String DESCRIPTION =
"\n <nThreads> The number of working threads."
+ "\n <nJobs> The number of jobs per sum."
+ "\n <type> 'm' for map side job, 'r' for reduce side job, 'x' for mix type."
+ "\n <nPart> The number of parts per job."
+ "\n <remoteDir> Remote directory for submitting jobs."
+ "\n <localDir> Local directory for storing output files.";
/** Number of worker threads */
final int nThreads;
/** Number of jobs */
final int nJobs;
/** Number of parts per job */
final int nParts;
/** The machine used in the computation */
final Machine machine;
/** The remote job directory */
final String remoteDir;
/** The local output directory */
final File localDir;
private Parameters(Machine machine, int nThreads, int nJobs, int nParts,
String remoteDir, File localDir) {
this.machine = machine;
this.nThreads = nThreads;
this.nJobs = nJobs;
this.nParts = nParts;
this.remoteDir = remoteDir;
this.localDir = localDir;
}
/** {@inheritDoc} */
public String toString() {
return "\nnThreads = " + nThreads
+ "\nnJobs = " + nJobs
+ "\nnParts = " + nParts + " (" + machine + ")"
+ "\nremoteDir = " + remoteDir
+ "\nlocalDir = " + localDir;
}
/** Parse parameters */
static Parameters parse(String[] args, int i) {
if (args.length - i < COUNT)
throw new IllegalArgumentException("args.length - i < COUNT = "
+ COUNT + ", args.length="
+ args.length + ", i=" + i + ", args=" + Arrays.asList(args));
final int nThreads = Integer.parseInt(args[i++]);
final int nJobs = Integer.parseInt(args[i++]);
final String type = args[i++];
final int nParts = Integer.parseInt(args[i++]);
final String remoteDir = args[i++];
final File localDir = new File(args[i++]);
if (!"m".equals(type) && !"r".equals(type) && !"x".equals(type)) {
throw new IllegalArgumentException("type=" + type + " is not equal to m, r or x");
} else if (nParts <= 0) {
throw new IllegalArgumentException("nParts = " + nParts + " <= 0");
} else if (nJobs <= 0) {
throw new IllegalArgumentException("nJobs = " + nJobs + " <= 0");
} else if (nThreads <= 0) {
throw new IllegalArgumentException("nThreads = " + nThreads + " <= 0");
}
Util.checkDirectory(localDir);
return new Parameters("m".equals(type)? MapSide.INSTANCE
: "r".equals(type)? ReduceSide.INSTANCE: MixMachine.INSTANCE,
nThreads, nJobs, nParts, remoteDir, localDir);
}
}
/////////////////////////////////////////////////////////////////////////////
/** Abstract machine for job execution. */
public static abstract class Machine {
/** Initialize a job */
abstract void init(Job job) throws IOException;
/** {@inheritDoc} */
public String toString() {return getClass().getSimpleName();}
/** Compute sigma */
static void compute(Summation sigma,
TaskInputOutputContext<?, ?, NullWritable, TaskResult> context
) throws IOException, InterruptedException {
String s;
LOG.info(s = "sigma=" + sigma);
context.setStatus(s);
final long start = System.currentTimeMillis();
sigma.compute();
final long duration = System.currentTimeMillis() - start;
final TaskResult result = new TaskResult(sigma, duration);
LOG.info(s = "result=" + result);
context.setStatus(s);
context.write(NullWritable.get(), result);
}
/** Split for the summations */
public static final class SummationSplit extends InputSplit implements Writable, Container<Summation> {
private final static String[] EMPTY = {};
private Summation sigma;
public SummationSplit() {}
private SummationSplit(Summation sigma) {this.sigma = sigma;}
/** {@inheritDoc} */
@Override
public Summation getElement() {return sigma;}
/** {@inheritDoc} */
@Override
public long getLength() {return 1;}
/** {@inheritDoc} */
@Override
public String[] getLocations() {return EMPTY;}
/** {@inheritDoc} */
@Override
public void readFields(DataInput in) throws IOException {
sigma = SummationWritable.read(in);
}
/** {@inheritDoc} */
@Override
public void write(DataOutput out) throws IOException {
new SummationWritable(sigma).write(out);
}
}
/** An abstract InputFormat for the jobs */
public static abstract class AbstractInputFormat extends InputFormat<NullWritable, SummationWritable> {
/** Specify how to read the records */
@Override
public final RecordReader<NullWritable, SummationWritable> createRecordReader(
InputSplit generic, TaskAttemptContext context) {
final SummationSplit split = (SummationSplit)generic;
//return a record reader
return new RecordReader<NullWritable, SummationWritable>() {
boolean done = false;
/** {@inheritDoc} */
@Override
public void initialize(InputSplit split, TaskAttemptContext context) {}
/** {@inheritDoc} */
@Override
public boolean nextKeyValue() {return !done ? done = true : false;}
/** {@inheritDoc} */
@Override
public NullWritable getCurrentKey() {return NullWritable.get();}
/** {@inheritDoc} */
@Override
public SummationWritable getCurrentValue() {return new SummationWritable(split.getElement());}
/** {@inheritDoc} */
@Override
public float getProgress() {return done? 1f: 0f;}
/** {@inheritDoc} */
@Override
public void close() {}
};
}
}
}
/////////////////////////////////////////////////////////////////////////////
/**
* A machine which does computation on the map side.
*/
public static class MapSide extends Machine {
private static final MapSide INSTANCE = new MapSide();
/** {@inheritDoc} */
@Override
public void init(Job job) {
// setup mapper
job.setMapperClass(SummingMapper.class);
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(TaskResult.class);
// zero reducer
job.setNumReduceTasks(0);
// setup input
job.setInputFormatClass(PartitionInputFormat.class);
}
/** An InputFormat which partitions a summation */
public static class PartitionInputFormat extends AbstractInputFormat {
/** Partitions the summation into parts and then return them as splits */
@Override
public List<InputSplit> getSplits(JobContext context) {
//read sigma from conf
final Configuration conf = context.getConfiguration();
final Summation sigma = SummationWritable.read(DistSum.class, conf);
final int nParts = conf.getInt(N_PARTS, 0);
//create splits
final List<InputSplit> splits = new ArrayList<InputSplit>(nParts);
final Summation[] parts = sigma.partition(nParts);
for(int i = 0; i < parts.length; ++i) {
splits.add(new SummationSplit(parts[i]));
//LOG.info("parts[" + i + "] = " + parts[i]);
}
return splits;
}
}
/** A mapper which computes sums */
public static class SummingMapper extends
Mapper<NullWritable, SummationWritable, NullWritable, TaskResult> {
@Override
protected void map(NullWritable nw, SummationWritable sigma, final Context context
) throws IOException, InterruptedException {
compute(sigma.getElement(), context);
}
}
}
/////////////////////////////////////////////////////////////////////////////
/**
* A machine which does computation on the reduce side.
*/
public static class ReduceSide extends Machine {
private static final ReduceSide INSTANCE = new ReduceSide();
/** {@inheritDoc} */
@Override
public void init(Job job) {
// setup mapper
job.setMapperClass(PartitionMapper.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(SummationWritable.class);
// setup partitioner
job.setPartitionerClass(IndexPartitioner.class);
// setup reducer
job.setReducerClass(SummingReducer.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(TaskResult.class);
final Configuration conf = job.getConfiguration();
final int nParts = conf.getInt(N_PARTS, 1);
job.setNumReduceTasks(nParts);
// setup input
job.setInputFormatClass(SummationInputFormat.class);
}
/** An InputFormat which returns a single summation. */
public static class SummationInputFormat extends AbstractInputFormat {
/** @return a list containing a single split of summation */
@Override
public List<InputSplit> getSplits(JobContext context) {
//read sigma from conf
final Configuration conf = context.getConfiguration();
final Summation sigma = SummationWritable.read(DistSum.class, conf);
//create splits
final List<InputSplit> splits = new ArrayList<InputSplit>(1);
splits.add(new SummationSplit(sigma));
return splits;
}
}
/** A Mapper which partitions a summation */
public static class PartitionMapper extends
Mapper<NullWritable, SummationWritable, IntWritable, SummationWritable> {
/** Partitions sigma into parts */
@Override
protected void map(NullWritable nw, SummationWritable sigma, final Context context
) throws IOException, InterruptedException {
final Configuration conf = context.getConfiguration();
final int nParts = conf.getInt(N_PARTS, 0);
final Summation[] parts = sigma.getElement().partition(nParts);
for(int i = 0; i < parts.length; ++i) {
context.write(new IntWritable(i), new SummationWritable(parts[i]));
LOG.info("parts[" + i + "] = " + parts[i]);
}
}
}
/** Use the index for partitioning. */
public static class IndexPartitioner extends Partitioner<IntWritable, SummationWritable> {
/** Return the index as the partition. */
@Override
public int getPartition(IntWritable index, SummationWritable value, int numPartitions) {
return index.get();
}
}
/** A Reducer which computes sums */
public static class SummingReducer extends
Reducer<IntWritable, SummationWritable, NullWritable, TaskResult> {
@Override
protected void reduce(IntWritable index, Iterable<SummationWritable> sums,
Context context) throws IOException, InterruptedException {
LOG.info("index=" + index);
for(SummationWritable sigma : sums)
compute(sigma.getElement(), context);
}
}
}
/////////////////////////////////////////////////////////////////////////////
/**
* A machine which chooses Machine in runtime according to the cluster status
*/
public static class MixMachine extends Machine {
private static final MixMachine INSTANCE = new MixMachine();
private Cluster cluster;
/** {@inheritDoc} */
@Override
public synchronized void init(Job job) throws IOException {
final Configuration conf = job.getConfiguration();
if (cluster == null) {
String jobTrackerStr = conf.get("mapreduce.jobtracker.address", "localhost:8012");
cluster = new Cluster(NetUtils.createSocketAddr(jobTrackerStr), conf);
}
chooseMachine(conf).init(job);
}
/**
* Choose a Machine in runtime according to the cluster status.
*/
private Machine chooseMachine(Configuration conf) throws IOException {
final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
try {
for(;; Thread.sleep(2000)) {
//get cluster status
final ClusterMetrics status = cluster.getClusterStatus();
final int m =
status.getMapSlotCapacity() - status.getOccupiedMapSlots();
final int r =
status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
if (m >= parts || r >= parts) {
//favor ReduceSide machine
final Machine value = r >= parts?
ReduceSide.INSTANCE: MapSide.INSTANCE;
Util.out.println(" " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
return value;
}
}
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
/////////////////////////////////////////////////////////////////////////////
private final Util.Timer timer = new Util.Timer(true);
private Parameters parameters;
/** Get Parameters */
Parameters getParameters() {return parameters;}
/** Set Parameters */
void setParameters(Parameters p) {parameters = p;}
/** Create a job */
private Job createJob(String name, Summation sigma) throws IOException {
final Job job = Job.getInstance(getConf(), parameters.remoteDir + "/" +
name);
final Configuration jobconf = job.getConfiguration();
job.setJarByClass(DistSum.class);
jobconf.setInt(N_PARTS, parameters.nParts);
SummationWritable.write(sigma, DistSum.class, jobconf);
// disable task timeout
jobconf.setLong(MRJobConfig.TASK_TIMEOUT, 0);
// do not use speculative execution
jobconf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
jobconf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
return job;
}
/** Start a job to compute sigma */
private void compute(final String name, Summation sigma) throws IOException {
if (sigma.getValue() != null)
throw new IOException("sigma.getValue() != null, sigma=" + sigma);
//setup remote directory
final FileSystem fs = FileSystem.get(getConf());
final Path dir = fs.makeQualified(new Path(parameters.remoteDir, name));
if (!Util.createNonexistingDirectory(fs, dir))
return;
//setup a job
final Job job = createJob(name, sigma);
final Path outdir = new Path(dir, "out");
FileOutputFormat.setOutputPath(job, outdir);
//start a map/reduce job
final String startmessage = "steps/parts = "
+ sigma.E.getSteps() + "/" + parameters.nParts
+ " = " + Util.long2string(sigma.E.getSteps()/parameters.nParts);
Util.runJob(name, job, parameters.machine, startmessage, timer);
final List<TaskResult> results = Util.readJobOutputs(fs, outdir);
Util.writeResults(name, results, fs, parameters.remoteDir);
fs.delete(dir, true);
//combine results
final List<TaskResult> combined = Util.combine(results);
final PrintWriter out = Util.createWriter(parameters.localDir, name);
try {
for(TaskResult r : combined) {
final String s = taskResult2string(name, r);
out.println(s);
out.flush();
Util.out.println(s);
}
} finally {
out.close();
}
if (combined.size() == 1) {
final Summation s = combined.get(0).getElement();
if (sigma.contains(s) && s.contains(sigma))
sigma.setValue(s.getValue());
}
}
/** Convert a TaskResult to a String */
public static String taskResult2string(String name, TaskResult result) {
return NAME + " " + name + "> " + result;
}
/** Convert a String to a (String, TaskResult) pair */
public static Map.Entry<String, TaskResult> string2TaskResult(final String s) {
// LOG.info("line = " + line);
int j = s.indexOf(NAME);
if (j == 0) {
int i = j + NAME.length() + 1;
j = s.indexOf("> ", i);
final String key = s.substring(i, j);
final TaskResult value = TaskResult.valueOf(s.substring(j + 2));
return new Map.Entry<String, TaskResult>(){
@Override
public String getKey() {return key;}
@Override
public TaskResult getValue() {return value;}
@Override
public TaskResult setValue(TaskResult value) {
throw new UnsupportedOperationException();
}
};
}
return null;
}
/** Callable computation */
class Computation implements Callable<Computation> {
private final int index;
private final String name;
private final Summation sigma;
Computation(int index, String name, Summation sigma) {
this.index = index;
this.name = name;
this.sigma = sigma;
}
/** @return The job name */
String getJobName() {return String.format("%s.job%03d", name, index);}
/** {@inheritDoc} */
@Override
public String toString() {return getJobName() + sigma;}
/** Start the computation */
@Override
public Computation call() {
if (sigma.getValue() == null)
try {
compute(getJobName(), sigma);
} catch(Exception e) {
Util.out.println("ERROR: Got an exception from " + getJobName());
e.printStackTrace(Util.out);
}
return this;
}
}
/** Partition sigma and execute the computations. */
private Summation execute(String name, Summation sigma) {
final Summation[] summations = sigma.partition(parameters.nJobs);
final List<Computation> computations = new ArrayList<Computation>();
for(int i = 0; i < summations.length; i++)
computations.add(new Computation(i, name, summations[i]));
try {
Util.execute(parameters.nThreads, computations);
} catch (Exception e) {
throw new RuntimeException(e);
}
final List<Summation> combined = Util.combine(Arrays.asList(summations));
return combined.size() == 1? combined.get(0): null;
}
/** {@inheritDoc} */
@Override
public int run(String[] args) throws Exception {
//parse arguments
if (args.length != Parameters.COUNT + 2)
return Util.printUsage(args, getClass().getName()
+ " <name> <sigma> " + Parameters.LIST
+ "\n <name> The name."
+ "\n <sigma> The summation."
+ Parameters.DESCRIPTION);
int i = 0;
final String name = args[i++];
final Summation sigma = Summation.valueOf(args[i++]);
setParameters(DistSum.Parameters.parse(args, i));
Util.out.println();
Util.out.println("name = " + name);
Util.out.println("sigma = " + sigma);
Util.out.println(parameters);
Util.out.println();
//run jobs
final Summation result = execute(name, sigma);
if (result.equals(sigma)) {
sigma.setValue(result.getValue());
timer.tick("\n\nDONE\n\nsigma=" + sigma);
return 0;
} else {
timer.tick("\n\nDONE WITH ERROR\n\nresult=" + result);
return 1;
}
}
/** main */
public static void main(String[] args) throws Exception {
System.exit(ToolRunner.run(null, new DistSum(), args));
}
}
| 22,278 | 35.403595 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Combinable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi;
/**
* A class is Combinable if its object can be combined with other objects.
* @param <T> The generic type
*/
public interface Combinable<T> extends Comparable<T> {
/**
* Combine this with that.
* @param that Another object.
* @return The combined object.
*/
public T combine(T that);
}
| 1,155 | 36.290323 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/SummationWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.examples.pi.math.ArithmeticProgression;
import org.apache.hadoop.examples.pi.math.Summation;
import org.apache.hadoop.io.WritableComparable;
/** A Writable class for Summation */
public final class SummationWritable implements WritableComparable<SummationWritable>, Container<Summation> {
private Summation sigma;
public SummationWritable() {}
SummationWritable(Summation sigma) {this.sigma = sigma;}
/** {@inheritDoc} */
@Override
public String toString() {return getClass().getSimpleName() + sigma;}
/** {@inheritDoc} */
@Override
public Summation getElement() {return sigma;}
/** Read sigma from conf */
public static Summation read(Class<?> clazz, Configuration conf) {
return Summation.valueOf(conf.get(clazz.getSimpleName() + ".sigma"));
}
/** Write sigma to conf */
public static void write(Summation sigma, Class<?> clazz, Configuration conf) {
conf.set(clazz.getSimpleName() + ".sigma", sigma.toString());
}
/** Read Summation from DataInput */
static Summation read(DataInput in) throws IOException {
final SummationWritable s = new SummationWritable();
s.readFields(in);
return s.getElement();
}
/** {@inheritDoc} */
@Override
public void readFields(DataInput in) throws IOException {
final ArithmeticProgression N = ArithmeticProgressionWritable.read(in);
final ArithmeticProgression E = ArithmeticProgressionWritable.read(in);
sigma = new Summation(N, E);
if (in.readBoolean()) {
sigma.setValue(in.readDouble());
}
}
/** Write sigma to DataOutput */
public static void write(Summation sigma, DataOutput out) throws IOException {
ArithmeticProgressionWritable.write(sigma.N, out);
ArithmeticProgressionWritable.write(sigma.E, out);
final Double v = sigma.getValue();
if (v == null)
out.writeBoolean(false);
else {
out.writeBoolean(true);
out.writeDouble(v);
}
}
/** {@inheritDoc} */
@Override
public void write(DataOutput out) throws IOException {
write(sigma, out);
}
/** {@inheritDoc} */
@Override
public int compareTo(SummationWritable that) {
return this.sigma.compareTo(that.sigma);
}
/** {@inheritDoc} */
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
else if (obj != null && obj instanceof SummationWritable) {
final SummationWritable that = (SummationWritable)obj;
return this.compareTo(that) == 0;
}
throw new IllegalArgumentException(obj == null? "obj == null":
"obj.getClass()=" + obj.getClass());
}
/** Not supported */
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
/** A writable class for ArithmeticProgression */
private static class ArithmeticProgressionWritable {
/** Read ArithmeticProgression from DataInput */
private static ArithmeticProgression read(DataInput in) throws IOException {
return new ArithmeticProgression(in.readChar(),
in.readLong(), in.readLong(), in.readLong());
}
/** Write ArithmeticProgression to DataOutput */
private static void write(ArithmeticProgression ap, DataOutput out
) throws IOException {
out.writeChar(ap.symbol);
out.writeLong(ap.value);
out.writeLong(ap.delta);
out.writeLong(ap.limit);
}
}
}
| 4,343 | 30.941176 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistBbp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.examples.pi.DistSum.Computation;
import org.apache.hadoop.examples.pi.DistSum.Parameters;
import org.apache.hadoop.examples.pi.math.Bellard;
import org.apache.hadoop.examples.pi.math.Summation;
import org.apache.hadoop.examples.pi.math.Bellard.Parameter;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* A map/reduce program that uses a BBP-type method to compute exact
* binary digits of Pi.
* This program is designed for computing the n th bit of Pi,
* for large n, say n >= 10^8.
* For computing lower bits of Pi, consider using bbp.
*
* The actually computation is done by DistSum jobs.
* The steps for launching the jobs are:
*
* (1) Initialize parameters.
* (2) Create a list of sums.
* (3) Read computed values from the given local directory.
* (4) Remove the computed values from the sums.
* (5) Partition the remaining sums into computation jobs.
* (6) Submit the computation jobs to a cluster and then wait for the results.
* (7) Write job outputs to the given local directory.
* (8) Combine the job outputs and print the Pi bits.
*/
/*
* The command line format is:
* > hadoop org.apache.hadoop.examples.pi.DistBbp \
* <b> <nThreads> <nJobs> <type> <nPart> <remoteDir> <localDir>
*
* And the parameters are:
* <b> The number of bits to skip, i.e. compute the (b+1)th position.
* <nThreads> The number of working threads.
* <nJobs> The number of jobs per sum.
* <type> 'm' for map side job, 'r' for reduce side job, 'x' for mix type.
* <nPart> The number of parts per job.
* <remoteDir> Remote directory for submitting jobs.
* <localDir> Local directory for storing output files.
*
* Note that it may take a long time to finish all the jobs when <b> is large.
* If the program is killed in the middle of the execution, the same command with
* a different <remoteDir> can be used to resume the execution. For example, suppose
* we use the following command to compute the (10^15+57)th bit of Pi.
*
* > hadoop org.apache.hadoop.examples.pi.DistBbp \
* 1,000,000,000,000,056 20 1000 x 500 remote/a local/output
*
* It uses 20 threads to summit jobs so that there are at most 20 concurrent jobs.
* Each sum (there are totally 14 sums) is partitioned into 1000 jobs.
* The jobs will be executed in map-side or reduce-side. Each job has 500 parts.
* The remote directory for the jobs is remote/a and the local directory
* for storing output is local/output. Depends on the cluster configuration,
* it may take many days to finish the entire execution. If the execution is killed,
* we may resume it by
*
* > hadoop org.apache.hadoop.examples.pi.DistBbp \
* 1,000,000,000,000,056 20 1000 x 500 remote/b local/output
*/
public final class DistBbp extends Configured implements Tool {
public static final String DESCRIPTION
= "A map/reduce program that uses a BBP-type formula to compute exact bits of Pi.";
private final Util.Timer timer = new Util.Timer(true);
/** {@inheritDoc} */
public int run(String[] args) throws Exception {
//parse arguments
if (args.length != DistSum.Parameters.COUNT + 1)
return Util.printUsage(args,
getClass().getName() + " <b> " + Parameters.LIST
+ "\n <b> The number of bits to skip, i.e. compute the (b+1)th position."
+ Parameters.DESCRIPTION);
int i = 0;
final long b = Util.string2long(args[i++]);
final DistSum.Parameters parameters = DistSum.Parameters.parse(args, i);
if (b < 0)
throw new IllegalArgumentException("b = " + b + " < 0");
Util.printBitSkipped(b);
Util.out.println(parameters);
Util.out.println();
//initialize sums
final DistSum distsum = new DistSum();
distsum.setConf(getConf());
distsum.setParameters(parameters);
final boolean isVerbose = getConf().getBoolean(Parser.VERBOSE_PROPERTY, false);
final Map<Parameter, List<TaskResult>> existings = new Parser(isVerbose).parse(parameters.localDir.getPath(), null);
Parser.combine(existings);
for(List<TaskResult> tr : existings.values())
Collections.sort(tr);
Util.out.println();
final Map<Bellard.Parameter, Bellard.Sum> sums = Bellard.getSums(b, parameters.nJobs, existings);
Util.out.println();
//execute the computations
execute(distsum, sums);
//compute Pi from the sums
final double pi = Bellard.computePi(b, sums);
Util.printBitSkipped(b);
Util.out.println(Util.pi2string(pi, Bellard.bit2terms(b)));
return 0;
}
/** Execute DistSum computations */
private void execute(DistSum distsum,
final Map<Bellard.Parameter, Bellard.Sum> sums) throws Exception {
final List<Computation> computations = new ArrayList<Computation>();
int i = 0;
for(Bellard.Parameter p : Bellard.Parameter.values())
for(Summation s : sums.get(p))
if (s.getValue() == null)
computations.add(distsum.new Computation(i++, p.toString(), s));
if (computations.isEmpty())
Util.out.println("No computation");
else {
timer.tick("execute " + computations.size() + " computation(s)");
Util.execute(distsum.getParameters().nThreads, computations);
timer.tick("done");
}
}
/** main */
public static void main(String[] args) throws Exception {
System.exit(ToolRunner.run(null, new DistBbp(), args));
}
}
| 6,426 | 39.677215 | 120 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.examples.pi.math.Bellard;
import org.apache.hadoop.examples.pi.math.Bellard.Parameter;
import com.google.common.base.Charsets;
/** A class for parsing outputs */
public final class Parser {
static final String VERBOSE_PROPERTY = "pi.parser.verbose";
final boolean isVerbose;
public Parser(boolean isVerbose) {
this.isVerbose = isVerbose;
}
private void println(String s) {
if (isVerbose)
Util.out.println(s);
}
/** Parse a line */
private static void parseLine(final String line, Map<Parameter, List<TaskResult>> m) {
// LOG.info("line = " + line);
final Map.Entry<String, TaskResult> e = DistSum.string2TaskResult(line);
if (e != null) {
final List<TaskResult> sums = m.get(Parameter.get(e.getKey()));
if (sums == null)
throw new IllegalArgumentException("sums == null, line=" + line + ", e=" + e);
sums.add(e.getValue());
}
}
/** Parse a file or a directory tree */
private void parse(File f, Map<Parameter, List<TaskResult>> sums) throws IOException {
if (f.isDirectory()) {
println("Process directory " + f);
for(File child : f.listFiles())
parse(child, sums);
} else if (f.getName().endsWith(".txt")) {
println("Parse file " + f);
final Map<Parameter, List<TaskResult>> m = new TreeMap<Parameter, List<TaskResult>>();
for(Parameter p : Parameter.values())
m.put(p, new ArrayList<TaskResult>());
final BufferedReader in = new BufferedReader(
new InputStreamReader(new FileInputStream(f), Charsets.UTF_8));
try {
for(String line; (line = in.readLine()) != null; )
try {
parseLine(line, m);
} catch(RuntimeException e) {
Util.err.println("line = " + line);
throw e;
}
} finally {
in.close();
}
for(Parameter p : Parameter.values()) {
final List<TaskResult> combined = Util.combine(m.get(p));
if (!combined.isEmpty()) {
println(p + " (size=" + combined.size() + "):");
for(TaskResult r : combined)
println(" " + r);
}
sums.get(p).addAll(m.get(p));
}
}
}
/** Parse a path */
private Map<Parameter, List<TaskResult>> parse(String f) throws IOException {
final Map<Parameter, List<TaskResult>> m = new TreeMap<Parameter, List<TaskResult>>();
for(Parameter p : Parameter.values())
m.put(p, new ArrayList<TaskResult>());
parse(new File(f), m);
//LOG.info("m=" + m.toString().replace(", ", ",\n "));
for(Parameter p : Parameter.values())
m.put(p, m.get(p));
return m;
}
/** Parse input and re-write results. */
Map<Parameter, List<TaskResult>> parse(String inputpath, String outputdir
) throws IOException {
//parse input
Util.out.print("\nParsing " + inputpath + " ... ");
Util.out.flush();
final Map<Parameter, List<TaskResult>> parsed = parse(inputpath);
Util.out.println("DONE");
//re-write the results
if (outputdir != null) {
Util.out.print("\nWriting to " + outputdir + " ...");
Util.out.flush();
for(Parameter p : Parameter.values()) {
final List<TaskResult> results = parsed.get(p);
Collections.sort(results);
final PrintWriter out = new PrintWriter(
new OutputStreamWriter(new FileOutputStream(
new File(outputdir, p + ".txt")), Charsets.UTF_8), true);
try {
for(int i = 0; i < results.size(); i++)
out.println(DistSum.taskResult2string(p + "." + i, results.get(i)));
}
finally {
out.close();
}
}
Util.out.println("DONE");
}
return parsed;
}
/** Combine results */
static <T extends Combinable<T>> Map<Parameter, T> combine(Map<Parameter, List<T>> m) {
final Map<Parameter, T> combined = new TreeMap<Parameter, T>();
for(Parameter p : Parameter.values()) {
//note: results would never be null due to the design of Util.combine
final List<T> results = Util.combine(m.get(p));
Util.out.format("%-6s => ", p);
if (results.size() != 1)
Util.out.println(results.toString().replace(", ", ",\n "));
else {
final T r = results.get(0);
combined.put(p, r);
Util.out.println(r);
}
}
return combined;
}
/** main */
public static void main(String[] args) throws IOException {
if (args.length < 2 || args.length > 3)
Util.printUsage(args, Parser.class.getName()
+ " <b> <inputpath> [<outputdir>]");
int i = 0;
final long b = Util.string2long(args[i++]);
final String inputpath = args[i++];
final String outputdir = args.length >= 3? args[i++]: null;
//read input
final Map<Parameter, List<TaskResult>> parsed = new Parser(true).parse(inputpath, outputdir);
final Map<Parameter, TaskResult> combined = combine(parsed);
long duration = 0;
for(TaskResult r : combined.values())
duration += r.getDuration();
//print pi
final double pi = Bellard.computePi(b, combined);
Util.printBitSkipped(b);
Util.out.println(Util.pi2string(pi, Bellard.bit2terms(b)));
Util.out.println("cpu time = " + Util.millis2String(duration));
}
}
| 6,540 | 32.891192 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Container.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi;
/**
* A class is a Container if it contains an element.
* @param <T> The generic type
*/
public interface Container<T> {
/**
* @return The contained element.
*/
public T getElement();
}
| 1,047 | 35.137931 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import org.apache.hadoop.examples.pi.DistSum.Machine;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Charsets;
/** Utility methods */
public class Util {
/** Output stream */
public static final PrintStream out = System.out;
/** Error stream */
public static final PrintStream err = System.out;
/** Timer */
public static class Timer {
private final boolean isAccumulative;
private final long start = System.currentTimeMillis();
private long previous = start;
/** Timer constructor
* @param isAccumulative Is accumulating the time duration?
*/
public Timer(boolean isAccumulative) {
this.isAccumulative = isAccumulative;
final StackTraceElement[] stack = Thread.currentThread().getStackTrace();
final StackTraceElement e = stack[stack.length - 1];
out.println(e + " started at " + new Date(start));
}
/** Same as tick(null). */
public long tick() {return tick(null);}
/**
* Tick
* @param s Output message. No output if it is null.
* @return delta
*/
public synchronized long tick(String s) {
final long t = System.currentTimeMillis();
final long delta = t - (isAccumulative? start: previous);
if (s != null) {
out.format("%15dms (=%-15s: %s%n", delta, millis2String(delta) + ")", s);
out.flush();
}
previous = t;
return delta;
}
}
/** Covert milliseconds to a String. */
public static String millis2String(long n) {
if (n < 0)
return "-" + millis2String(-n);
else if (n < 1000)
return n + "ms";
final StringBuilder b = new StringBuilder();
final int millis = (int)(n % 1000L);
if (millis != 0)
b.append(String.format(".%03d", millis));
if ((n /= 1000) < 60)
return b.insert(0, n).append("s").toString();
b.insert(0, String.format(":%02d", (int)(n % 60L)));
if ((n /= 60) < 60)
return b.insert(0, n).toString();
b.insert(0, String.format(":%02d", (int)(n % 60L)));
if ((n /= 60) < 24)
return b.insert(0, n).toString();
b.insert(0, n % 24L);
final int days = (int)((n /= 24) % 365L);
b.insert(0, days == 1? " day ": " days ").insert(0, days);
if ((n /= 365L) > 0)
b.insert(0, n == 1? " year ": " years ").insert(0, n);
return b.toString();
}
/** Covert a String to a long.
* This support comma separated number format.
*/
public static long string2long(String s) {
return Long.parseLong(s.trim().replace(",", ""));
}
/** Covert a long to a String in comma separated number format. */
public static String long2string(long n) {
if (n < 0)
return "-" + long2string(-n);
final StringBuilder b = new StringBuilder();
for(; n >= 1000; n = n/1000)
b.insert(0, String.format(",%03d", n % 1000));
return n + b.toString();
}
/** Parse a variable. */
public static long parseLongVariable(final String name, final String s) {
return string2long(parseStringVariable(name, s));
}
/** Parse a variable. */
public static String parseStringVariable(final String name, final String s) {
if (!s.startsWith(name + '='))
throw new IllegalArgumentException("!s.startsWith(name + '='), name="
+ name + ", s=" + s);
return s.substring(name.length() + 1);
}
/** Execute the callables by a number of threads */
public static <T, E extends Callable<T>> void execute(int nThreads, List<E> callables
) throws InterruptedException, ExecutionException {
final ExecutorService executor = Executors.newFixedThreadPool(nThreads);
final List<Future<T>> futures = executor.invokeAll(callables);
for(Future<T> f : futures)
f.get();
}
/** Print usage messages */
public static int printUsage(String[] args, String usage) {
err.println("args = " + Arrays.asList(args));
err.println();
err.println("Usage: java " + usage);
err.println();
ToolRunner.printGenericCommandUsage(err);
return -1;
}
/** Combine a list of items. */
public static <T extends Combinable<T>> List<T> combine(Collection<T> items) {
final List<T> sorted = new ArrayList<T>(items);
if (sorted.size() <= 1)
return sorted;
Collections.sort(sorted);
final List<T> combined = new ArrayList<T>(items.size());
T prev = sorted.get(0);
for(int i = 1; i < sorted.size(); i++) {
final T curr = sorted.get(i);
final T c = curr.combine(prev);
if (c != null)
prev = c;
else {
combined.add(prev);
prev = curr;
}
}
combined.add(prev);
return combined;
}
/** Check local directory. */
public static void checkDirectory(File dir) {
if (!dir.exists())
if (!dir.mkdirs())
throw new IllegalArgumentException("!dir.mkdirs(), dir=" + dir);
if (!dir.isDirectory())
throw new IllegalArgumentException("dir (=" + dir + ") is not a directory.");
}
/** Create a writer of a local file. */
public static PrintWriter createWriter(File dir, String prefix) throws IOException {
checkDirectory(dir);
SimpleDateFormat dateFormat = new SimpleDateFormat("-yyyyMMdd-HHmmssSSS");
for(;;) {
final File f = new File(dir,
prefix + dateFormat.format(new Date(System.currentTimeMillis())) + ".txt");
if (!f.exists())
return new PrintWriter(new OutputStreamWriter(new FileOutputStream(f), Charsets.UTF_8));
try {Thread.sleep(10);} catch (InterruptedException e) {}
}
}
/** Print a "bits skipped" message. */
public static void printBitSkipped(final long b) {
out.println();
out.println("b = " + long2string(b)
+ " (" + (b < 2? "bit": "bits") + " skipped)");
}
/** Convert a pi value to a String. */
public static String pi2string(final double pi, final long terms) {
final long value = (long)(pi * (1L << DOUBLE_PRECISION));
final int acc_bit = accuracy(terms, false);
final int acc_hex = acc_bit/4;
final int shift = DOUBLE_PRECISION - acc_bit;
return String.format("%0" + acc_hex + "X %0" + (13-acc_hex) + "X (%d hex digits)",
value >> shift, value & ((1 << shift) - 1), acc_hex);
}
static final int DOUBLE_PRECISION = 52; //mantissa size
static final int MACHEPS_EXPONENT = DOUBLE_PRECISION + 1;
/** Estimate accuracy. */
public static int accuracy(final long terms, boolean print) {
final double error = terms <= 0? 2: (Math.log(terms) / Math.log(2)) / 2;
final int bits = MACHEPS_EXPONENT - (int)Math.ceil(error);
if (print)
out.println("accuracy: bits=" + bits + ", terms=" + long2string(terms) + ", error exponent=" + error);
return bits - bits%4;
}
private static final String JOB_SEPARATION_PROPERTY = "pi.job.separation.seconds";
private static final Semaphore JOB_SEMAPHORE = new Semaphore(1);
/** Run a job. */
static void runJob(String name, Job job, Machine machine, String startmessage, Util.Timer timer) {
JOB_SEMAPHORE.acquireUninterruptibly();
Long starttime = null;
try {
try {
starttime = timer.tick("starting " + name + " ...\n " + startmessage);
//initialize and submit a job
machine.init(job);
job.submit();
// Separate jobs
final long sleeptime = 1000L * job.getConfiguration().getInt(JOB_SEPARATION_PROPERTY, 10);
if (sleeptime > 0) {
Util.out.println(name + "> sleep(" + Util.millis2String(sleeptime) + ")");
Thread.sleep(sleeptime);
}
} finally {
JOB_SEMAPHORE.release();
}
if (!job.waitForCompletion(false))
throw new RuntimeException(name + " failed.");
} catch(Exception e) {
throw e instanceof RuntimeException? (RuntimeException)e: new RuntimeException(e);
} finally {
if (starttime != null)
timer.tick(name + "> timetaken=" + Util.millis2String(timer.tick() - starttime));
}
}
/** Read job outputs */
static List<TaskResult> readJobOutputs(FileSystem fs, Path outdir) throws IOException {
final List<TaskResult> results = new ArrayList<TaskResult>();
for(FileStatus status : fs.listStatus(outdir)) {
if (status.getPath().getName().startsWith("part-")) {
final BufferedReader in = new BufferedReader(
new InputStreamReader(fs.open(status.getPath()), Charsets.UTF_8));
try {
for(String line; (line = in.readLine()) != null; )
results.add(TaskResult.valueOf(line));
}
finally {
in.close();
}
}
}
if (results.isEmpty())
throw new IOException("Output not found");
return results;
}
/** Write results */
static void writeResults(String name, List<TaskResult> results, FileSystem fs, String dir) throws IOException {
final Path outfile = new Path(dir, name + ".txt");
Util.out.println(name + "> writing results to " + outfile);
final PrintWriter out = new PrintWriter(new OutputStreamWriter(fs.create(outfile), Charsets.UTF_8), true);
try {
for(TaskResult r : results)
out.println(r);
}
finally {
out.close();
}
}
/** Create a directory. */
static boolean createNonexistingDirectory(FileSystem fs, Path dir) throws IOException {
if (fs.exists(dir)) {
Util.err.println("dir (= " + dir + ") already exists.");
return false;
} else if (!fs.mkdirs(dir)) {
throw new IOException("Cannot create working directory " + dir);
}
fs.setPermission(dir, new FsPermission((short)0777));
return true;
}
}
| 11,343 | 33.066066 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Montgomery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.examples.pi.math;
/** Montgomery method.
*
* References:
*
* [1] Richard Crandall and Carl Pomerance. Prime Numbers: A Computational
* Perspective. Springer-Verlag, 2001.
*
* [2] Peter Montgomery. Modular multiplication without trial division.
* Math. Comp., 44:519-521, 1985.
*/
class Montgomery {
protected final Product product = new Product();
protected long N;
protected long N_I; // N'
protected long R;
protected long R_1; // R - 1
protected int s;
/** Set the modular and initialize this object. */
Montgomery set(long n) {
if (n % 2 != 1)
throw new IllegalArgumentException("n % 2 != 1, n=" + n);
N = n;
R = Long.highestOneBit(n) << 1;
N_I = R - Modular.modInverse(N, R);
R_1 = R - 1;
s = Long.numberOfTrailingZeros(R);
return this;
}
/** Compute 2^y mod N for N odd. */
long mod(final long y) {
long p = R - N;
long x = p << 1;
if (x >= N) x -= N;
for(long mask = Long.highestOneBit(y); mask > 0; mask >>>= 1) {
p = product.m(p, p);
if ((mask & y) != 0) p = product.m(p, x);
}
return product.m(p, 1);
}
class Product {
private final LongLong x = new LongLong();
private final LongLong xN_I = new LongLong();
private final LongLong aN = new LongLong();
long m(final long c, final long d) {
LongLong.multiplication(x, c, d);
// a = (x * N')&(R - 1) = ((x & R_1) * N') & R_1
final long a = LongLong.multiplication(xN_I, x.and(R_1), N_I).and(R_1);
LongLong.multiplication(aN, a, N);
final long z = aN.plusEqual(x).shiftRight(s);
return z < N? z: z - N;
}
}
}
| 2,496 | 31.012821 | 77 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.