repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.TreeMap;
import org.apache.hadoop.record.Buffer;
import org.apache.hadoop.record.Record;
import org.apache.hadoop.record.RecordOutput;
/**
* Deserialized for records that reads typed bytes.
*/
public class TypedBytesRecordOutput implements RecordOutput {
private TypedBytesOutput out;
private TypedBytesRecordOutput() {}
private void setTypedBytesOutput(TypedBytesOutput out) {
this.out = out;
}
private static final ThreadLocal<TypedBytesRecordOutput> TB_OUT =
new ThreadLocal<TypedBytesRecordOutput>() {
@Override
protected TypedBytesRecordOutput initialValue() {
return new TypedBytesRecordOutput();
}
};
/**
* Get a thread-local typed bytes record input for the supplied
* {@link TypedBytesOutput}.
*
* @param out typed bytes output object
* @return typed bytes record output corresponding to the supplied
* {@link TypedBytesOutput}.
*/
public static TypedBytesRecordOutput get(TypedBytesOutput out) {
TypedBytesRecordOutput bout = TB_OUT.get();
bout.setTypedBytesOutput(out);
return bout;
}
/**
* Get a thread-local typed bytes record output for the supplied
* {@link DataOutput}.
*
* @param out data output object
* @return typed bytes record output corresponding to the supplied
* {@link DataOutput}.
*/
public static TypedBytesRecordOutput get(DataOutput out) {
return get(TypedBytesOutput.get(out));
}
/** Creates a new instance of TypedBytesRecordOutput. */
public TypedBytesRecordOutput(TypedBytesOutput out) {
this.out = out;
}
/** Creates a new instance of TypedBytesRecordOutput. */
public TypedBytesRecordOutput(DataOutput out) {
this(new TypedBytesOutput(out));
}
public void writeBool(boolean b, String tag) throws IOException {
out.writeBool(b);
}
public void writeBuffer(Buffer buf, String tag) throws IOException {
out.writeBytes(buf.get());
}
public void writeByte(byte b, String tag) throws IOException {
out.writeByte(b);
}
public void writeDouble(double d, String tag) throws IOException {
out.writeDouble(d);
}
public void writeFloat(float f, String tag) throws IOException {
out.writeFloat(f);
}
public void writeInt(int i, String tag) throws IOException {
out.writeInt(i);
}
public void writeLong(long l, String tag) throws IOException {
out.writeLong(l);
}
public void writeString(String s, String tag) throws IOException {
out.writeString(s);
}
public void startRecord(Record r, String tag) throws IOException {
out.writeListHeader();
}
public void startVector(ArrayList v, String tag) throws IOException {
out.writeVectorHeader(v.size());
}
public void startMap(TreeMap m, String tag) throws IOException {
out.writeMapHeader(m.size());
}
public void endRecord(Record r, String tag) throws IOException {
out.writeListFooter();
}
public void endVector(ArrayList v, String tag) throws IOException {}
public void endMap(TreeMap m, String tag) throws IOException {}
}
| 4,015 | 27.685714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.SortedMapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.VIntWritable;
import org.apache.hadoop.io.VLongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Provides functionality for reading typed bytes as Writable objects.
*
* @see TypedBytesInput
*/
public class TypedBytesWritableInput implements Configurable {
private TypedBytesInput in;
private Configuration conf;
private TypedBytesWritableInput() {
conf = new Configuration();
}
private void setTypedBytesInput(TypedBytesInput in) {
this.in = in;
}
private static final ThreadLocal<TypedBytesWritableInput> TB_IN =
new ThreadLocal<TypedBytesWritableInput>() {
@Override
protected TypedBytesWritableInput initialValue() {
return new TypedBytesWritableInput();
}
};
/**
* Get a thread-local typed bytes writable input for the supplied
* {@link TypedBytesInput}.
*
* @param in typed bytes input object
* @return typed bytes writable input corresponding to the supplied
* {@link TypedBytesInput}.
*/
public static TypedBytesWritableInput get(TypedBytesInput in) {
TypedBytesWritableInput bin = TB_IN.get();
bin.setTypedBytesInput(in);
return bin;
}
/**
* Get a thread-local typed bytes writable input for the supplied
* {@link DataInput}.
*
* @param in data input object
* @return typed bytes writable input corresponding to the supplied
* {@link DataInput}.
*/
public static TypedBytesWritableInput get(DataInput in) {
return get(TypedBytesInput.get(in));
}
/** Creates a new instance of TypedBytesWritableInput. */
public TypedBytesWritableInput(TypedBytesInput in) {
this();
this.in = in;
}
/** Creates a new instance of TypedBytesWritableInput. */
public TypedBytesWritableInput(DataInput din) {
this(new TypedBytesInput(din));
}
public Writable read() throws IOException {
Type type = in.readType();
if (type == null) {
return null;
}
switch (type) {
case BYTES:
return readBytes();
case BYTE:
return readByte();
case BOOL:
return readBoolean();
case INT:
return readVInt();
case LONG:
return readVLong();
case FLOAT:
return readFloat();
case DOUBLE:
return readDouble();
case STRING:
return readText();
case VECTOR:
return readArray();
case MAP:
return readMap();
case WRITABLE:
return readWritable();
default:
throw new RuntimeException("unknown type");
}
}
public Class<? extends Writable> readType() throws IOException {
Type type = in.readType();
if (type == null) {
return null;
}
switch (type) {
case BYTES:
return BytesWritable.class;
case BYTE:
return ByteWritable.class;
case BOOL:
return BooleanWritable.class;
case INT:
return VIntWritable.class;
case LONG:
return VLongWritable.class;
case FLOAT:
return FloatWritable.class;
case DOUBLE:
return DoubleWritable.class;
case STRING:
return Text.class;
case VECTOR:
return ArrayWritable.class;
case MAP:
return MapWritable.class;
case WRITABLE:
return Writable.class;
default:
throw new RuntimeException("unknown type");
}
}
public BytesWritable readBytes(BytesWritable bw) throws IOException {
byte[] bytes = in.readBytes();
if (bw == null) {
bw = new BytesWritable(bytes);
} else {
bw.set(bytes, 0, bytes.length);
}
return bw;
}
public BytesWritable readBytes() throws IOException {
return readBytes(null);
}
public ByteWritable readByte(ByteWritable bw) throws IOException {
if (bw == null) {
bw = new ByteWritable();
}
bw.set(in.readByte());
return bw;
}
public ByteWritable readByte() throws IOException {
return readByte(null);
}
public BooleanWritable readBoolean(BooleanWritable bw) throws IOException {
if (bw == null) {
bw = new BooleanWritable();
}
bw.set(in.readBool());
return bw;
}
public BooleanWritable readBoolean() throws IOException {
return readBoolean(null);
}
public IntWritable readInt(IntWritable iw) throws IOException {
if (iw == null) {
iw = new IntWritable();
}
iw.set(in.readInt());
return iw;
}
public IntWritable readInt() throws IOException {
return readInt(null);
}
public VIntWritable readVInt(VIntWritable iw) throws IOException {
if (iw == null) {
iw = new VIntWritable();
}
iw.set(in.readInt());
return iw;
}
public VIntWritable readVInt() throws IOException {
return readVInt(null);
}
public LongWritable readLong(LongWritable lw) throws IOException {
if (lw == null) {
lw = new LongWritable();
}
lw.set(in.readLong());
return lw;
}
public LongWritable readLong() throws IOException {
return readLong(null);
}
public VLongWritable readVLong(VLongWritable lw) throws IOException {
if (lw == null) {
lw = new VLongWritable();
}
lw.set(in.readLong());
return lw;
}
public VLongWritable readVLong() throws IOException {
return readVLong(null);
}
public FloatWritable readFloat(FloatWritable fw) throws IOException {
if (fw == null) {
fw = new FloatWritable();
}
fw.set(in.readFloat());
return fw;
}
public FloatWritable readFloat() throws IOException {
return readFloat(null);
}
public DoubleWritable readDouble(DoubleWritable dw) throws IOException {
if (dw == null) {
dw = new DoubleWritable();
}
dw.set(in.readDouble());
return dw;
}
public DoubleWritable readDouble() throws IOException {
return readDouble(null);
}
public Text readText(Text t) throws IOException {
if (t == null) {
t = new Text();
}
t.set(in.readString());
return t;
}
public Text readText() throws IOException {
return readText(null);
}
public ArrayWritable readArray(ArrayWritable aw) throws IOException {
if (aw == null) {
aw = new ArrayWritable(TypedBytesWritable.class);
} else if (!aw.getValueClass().equals(TypedBytesWritable.class)) {
throw new RuntimeException("value class has to be TypedBytesWritable");
}
int length = in.readVectorHeader();
Writable[] writables = new Writable[length];
for (int i = 0; i < length; i++) {
writables[i] = new TypedBytesWritable(in.readRaw());
}
aw.set(writables);
return aw;
}
public ArrayWritable readArray() throws IOException {
return readArray(null);
}
public MapWritable readMap(MapWritable mw) throws IOException {
if (mw == null) {
mw = new MapWritable();
}
int length = in.readMapHeader();
for (int i = 0; i < length; i++) {
Writable key = read();
Writable value = read();
mw.put(key, value);
}
return mw;
}
public MapWritable readMap() throws IOException {
return readMap(null);
}
public SortedMapWritable readSortedMap(SortedMapWritable mw)
throws IOException {
if (mw == null) {
mw = new SortedMapWritable();
}
int length = in.readMapHeader();
for (int i = 0; i < length; i++) {
WritableComparable key = (WritableComparable) read();
Writable value = read();
mw.put(key, value);
}
return mw;
}
public SortedMapWritable readSortedMap() throws IOException {
return readSortedMap(null);
}
public Writable readWritable(Writable writable) throws IOException {
ByteArrayInputStream bais = new ByteArrayInputStream(in.readBytes());
DataInputStream dis = new DataInputStream(bais);
String className = WritableUtils.readString(dis);
if (writable == null) {
try {
Class<? extends Writable> cls =
conf.getClassByName(className).asSubclass(Writable.class);
writable = (Writable) ReflectionUtils.newInstance(cls, conf);
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
} else if (!writable.getClass().getName().equals(className)) {
throw new IOException("wrong Writable class given");
}
writable.readFields(dis);
return writable;
}
public Writable readWritable() throws IOException {
return readWritable(null);
}
public Configuration getConf() {
return conf;
}
public void setConf(Configuration conf) {
this.conf = conf;
}
}
| 10,111 | 25.402089 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
import java.io.ByteArrayOutputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.SortedMapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.VIntWritable;
import org.apache.hadoop.io.VLongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.record.Record;
/**
* Provides functionality for writing Writable objects as typed bytes.
*
* @see TypedBytesOutput
*/
public class TypedBytesWritableOutput {
private TypedBytesOutput out;
private TypedBytesWritableOutput() {}
private void setTypedBytesOutput(TypedBytesOutput out) {
this.out = out;
}
private static final ThreadLocal<TypedBytesWritableOutput> TB_OUT =
new ThreadLocal<TypedBytesWritableOutput>() {
@Override
protected TypedBytesWritableOutput initialValue() {
return new TypedBytesWritableOutput();
}
};
/**
* Get a thread-local typed bytes writable input for the supplied
* {@link TypedBytesOutput}.
*
* @param out typed bytes output object
* @return typed bytes writable output corresponding to the supplied
* {@link TypedBytesOutput}.
*/
public static TypedBytesWritableOutput get(TypedBytesOutput out) {
TypedBytesWritableOutput bout = TB_OUT.get();
bout.setTypedBytesOutput(out);
return bout;
}
/**
* Get a thread-local typed bytes writable output for the supplied
* {@link DataOutput}.
*
* @param out data output object
* @return typed bytes writable output corresponding to the supplied
* {@link DataOutput}.
*/
public static TypedBytesWritableOutput get(DataOutput out) {
return get(TypedBytesOutput.get(out));
}
/** Creates a new instance of TypedBytesWritableOutput. */
public TypedBytesWritableOutput(TypedBytesOutput out) {
this();
this.out = out;
}
/** Creates a new instance of TypedBytesWritableOutput. */
public TypedBytesWritableOutput(DataOutput dout) {
this(new TypedBytesOutput(dout));
}
public void write(Writable w) throws IOException {
if (w instanceof TypedBytesWritable) {
writeTypedBytes((TypedBytesWritable) w);
} else if (w instanceof BytesWritable) {
writeBytes((BytesWritable) w);
} else if (w instanceof ByteWritable) {
writeByte((ByteWritable) w);
} else if (w instanceof BooleanWritable) {
writeBoolean((BooleanWritable) w);
} else if (w instanceof IntWritable) {
writeInt((IntWritable) w);
} else if (w instanceof VIntWritable) {
writeVInt((VIntWritable) w);
} else if (w instanceof LongWritable) {
writeLong((LongWritable) w);
} else if (w instanceof VLongWritable) {
writeVLong((VLongWritable) w);
} else if (w instanceof FloatWritable) {
writeFloat((FloatWritable) w);
} else if (w instanceof DoubleWritable) {
writeDouble((DoubleWritable) w);
} else if (w instanceof Text) {
writeText((Text) w);
} else if (w instanceof ArrayWritable) {
writeArray((ArrayWritable) w);
} else if (w instanceof MapWritable) {
writeMap((MapWritable) w);
} else if (w instanceof SortedMapWritable) {
writeSortedMap((SortedMapWritable) w);
} else if (w instanceof Record) {
writeRecord((Record) w);
} else {
writeWritable(w); // last resort
}
}
public void writeTypedBytes(TypedBytesWritable tbw) throws IOException {
out.writeRaw(tbw.getBytes(), 0, tbw.getLength());
}
public void writeBytes(BytesWritable bw) throws IOException {
byte[] bytes = Arrays.copyOfRange(bw.getBytes(), 0, bw.getLength());
out.writeBytes(bytes);
}
public void writeByte(ByteWritable bw) throws IOException {
out.writeByte(bw.get());
}
public void writeBoolean(BooleanWritable bw) throws IOException {
out.writeBool(bw.get());
}
public void writeInt(IntWritable iw) throws IOException {
out.writeInt(iw.get());
}
public void writeVInt(VIntWritable viw) throws IOException {
out.writeInt(viw.get());
}
public void writeLong(LongWritable lw) throws IOException {
out.writeLong(lw.get());
}
public void writeVLong(VLongWritable vlw) throws IOException {
out.writeLong(vlw.get());
}
public void writeFloat(FloatWritable fw) throws IOException {
out.writeFloat(fw.get());
}
public void writeDouble(DoubleWritable dw) throws IOException {
out.writeDouble(dw.get());
}
public void writeText(Text t) throws IOException {
out.writeString(t.toString());
}
public void writeArray(ArrayWritable aw) throws IOException {
Writable[] writables = aw.get();
out.writeVectorHeader(writables.length);
for (Writable writable : writables) {
write(writable);
}
}
public void writeMap(MapWritable mw) throws IOException {
out.writeMapHeader(mw.size());
for (Map.Entry<Writable, Writable> entry : mw.entrySet()) {
write(entry.getKey());
write(entry.getValue());
}
}
public void writeSortedMap(SortedMapWritable smw) throws IOException {
out.writeMapHeader(smw.size());
for (Map.Entry<WritableComparable, Writable> entry : smw.entrySet()) {
write(entry.getKey());
write(entry.getValue());
}
}
public void writeRecord(Record r) throws IOException {
r.serialize(TypedBytesRecordOutput.get(out));
}
public void writeWritable(Writable w) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
WritableUtils.writeString(dos, w.getClass().getName());
w.write(dos);
dos.close();
out.writeBytes(baos.toByteArray(), Type.WRITABLE.code);
}
}
| 7,079 | 30.466667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.record.Buffer;
import org.apache.hadoop.record.Index;
import org.apache.hadoop.record.RecordInput;
/**
* Serializer for records that writes typed bytes.
*/
public class TypedBytesRecordInput implements RecordInput {
private TypedBytesInput in;
private TypedBytesRecordInput() {}
private void setTypedBytesInput(TypedBytesInput in) {
this.in = in;
}
private static final ThreadLocal<TypedBytesRecordInput> TB_IN =
new ThreadLocal<TypedBytesRecordInput>() {
@Override
protected TypedBytesRecordInput initialValue() {
return new TypedBytesRecordInput();
}
};
/**
* Get a thread-local typed bytes record input for the supplied
* {@link TypedBytesInput}.
*
* @param in typed bytes input object
* @return typed bytes record input corresponding to the supplied
* {@link TypedBytesInput}.
*/
public static TypedBytesRecordInput get(TypedBytesInput in) {
TypedBytesRecordInput bin = TB_IN.get();
bin.setTypedBytesInput(in);
return bin;
}
/**
* Get a thread-local typed bytes record input for the supplied
* {@link DataInput}.
*
* @param in data input object
* @return typed bytes record input corresponding to the supplied
* {@link DataInput}.
*/
public static TypedBytesRecordInput get(DataInput in) {
return get(TypedBytesInput.get(in));
}
/** Creates a new instance of TypedBytesRecordInput. */
public TypedBytesRecordInput(TypedBytesInput in) {
this.in = in;
}
/** Creates a new instance of TypedBytesRecordInput. */
public TypedBytesRecordInput(DataInput in) {
this(new TypedBytesInput(in));
}
public boolean readBool(String tag) throws IOException {
in.skipType();
return in.readBool();
}
public Buffer readBuffer(String tag) throws IOException {
in.skipType();
return new Buffer(in.readBytes());
}
public byte readByte(String tag) throws IOException {
in.skipType();
return in.readByte();
}
public double readDouble(String tag) throws IOException {
in.skipType();
return in.readDouble();
}
public float readFloat(String tag) throws IOException {
in.skipType();
return in.readFloat();
}
public int readInt(String tag) throws IOException {
in.skipType();
return in.readInt();
}
public long readLong(String tag) throws IOException {
in.skipType();
return in.readLong();
}
public String readString(String tag) throws IOException {
in.skipType();
return in.readString();
}
public void startRecord(String tag) throws IOException {
in.skipType();
}
public Index startVector(String tag) throws IOException {
in.skipType();
return new TypedBytesIndex(in.readVectorHeader());
}
public Index startMap(String tag) throws IOException {
in.skipType();
return new TypedBytesIndex(in.readMapHeader());
}
public void endRecord(String tag) throws IOException {}
public void endVector(String tag) throws IOException {}
public void endMap(String tag) throws IOException {}
private static final class TypedBytesIndex implements Index {
private int nelems;
private TypedBytesIndex(int nelems) {
this.nelems = nelems;
}
public boolean done() {
return (nelems <= 0);
}
public void incr() {
nelems--;
}
}
}
| 4,270 | 25.364198 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.record.Buffer;
/**
* Provides functionality for writing typed bytes.
*/
public class TypedBytesOutput {
private DataOutput out;
private TypedBytesOutput() {}
private void setDataOutput(DataOutput out) {
this.out = out;
}
private static final ThreadLocal<TypedBytesOutput> TB_OUT =
new ThreadLocal<TypedBytesOutput>() {
@Override
protected TypedBytesOutput initialValue() {
return new TypedBytesOutput();
}
};
/**
* Get a thread-local typed bytes output for the supplied {@link DataOutput}.
*
* @param out data output object
* @return typed bytes output corresponding to the supplied
* {@link DataOutput}.
*/
public static TypedBytesOutput get(DataOutput out) {
TypedBytesOutput bout = TB_OUT.get();
bout.setDataOutput(out);
return bout;
}
/** Creates a new instance of TypedBytesOutput. */
public TypedBytesOutput(DataOutput out) {
this.out = out;
}
/**
* Writes a Java object as a typed bytes sequence.
*
* @param obj the object to be written
* @throws IOException
*/
public void write(Object obj) throws IOException {
if (obj instanceof Buffer) {
writeBytes((Buffer) obj);
} else if (obj instanceof Byte) {
writeByte((Byte) obj);
} else if (obj instanceof Boolean) {
writeBool((Boolean) obj);
} else if (obj instanceof Integer) {
writeInt((Integer) obj);
} else if (obj instanceof Long) {
writeLong((Long) obj);
} else if (obj instanceof Float) {
writeFloat((Float) obj);
} else if (obj instanceof Double) {
writeDouble((Double) obj);
} else if (obj instanceof String) {
writeString((String) obj);
} else if (obj instanceof ArrayList) {
writeVector((ArrayList) obj);
} else if (obj instanceof List) {
writeList((List) obj);
} else if (obj instanceof Map) {
writeMap((Map) obj);
} else {
throw new RuntimeException("cannot write objects of this type");
}
}
/**
* Writes a raw sequence of typed bytes.
*
* @param bytes the bytes to be written
* @throws IOException
*/
public void writeRaw(byte[] bytes) throws IOException {
out.write(bytes);
}
/**
* Writes a raw sequence of typed bytes.
*
* @param bytes the bytes to be written
* @param offset an offset in the given array
* @param length number of bytes from the given array to write
* @throws IOException
*/
public void writeRaw(byte[] bytes, int offset, int length)
throws IOException {
out.write(bytes, offset, length);
}
/**
* Writes a bytes array as a typed bytes sequence, using a given typecode
* and length.
*
* @param bytes the bytes array to be written
* @param code the typecode to use
* @param length the number of bytes to write, starting from position 0
* @throws IOException
*/
public void writeBytes(byte[] bytes, int code, int length) throws IOException {
out.write(code);
out.writeInt(length);
out.write(bytes, 0, length);
}
/**
* Writes a bytes array as a typed bytes sequence, using a given typecode.
*
* @param bytes the bytes array to be written
* @param code the typecode to use
* @throws IOException
*/
public void writeBytes(byte[] bytes, int code) throws IOException {
writeBytes(bytes, code, bytes.length);
}
/**
* Writes a bytes array as a typed bytes sequence.
*
* @param bytes the bytes array to be written
* @throws IOException
*/
public void writeBytes(byte[] bytes) throws IOException {
writeBytes(bytes, Type.BYTES.code);
}
/**
* Writes a bytes buffer as a typed bytes sequence.
*
* @param buffer the bytes buffer to be written
* @throws IOException
*/
public void writeBytes(Buffer buffer) throws IOException {
writeBytes(buffer.get(), Type.BYTES.code, buffer.getCount());
}
/**
* Writes a byte as a typed bytes sequence.
*
* @param b the byte to be written
* @throws IOException
*/
public void writeByte(byte b) throws IOException {
out.write(Type.BYTE.code);
out.write(b);
}
/**
* Writes a boolean as a typed bytes sequence.
*
* @param b the boolean to be written
* @throws IOException
*/
public void writeBool(boolean b) throws IOException {
out.write(Type.BOOL.code);
out.writeBoolean(b);
}
/**
* Writes an integer as a typed bytes sequence.
*
* @param i the integer to be written
* @throws IOException
*/
public void writeInt(int i) throws IOException {
out.write(Type.INT.code);
out.writeInt(i);
}
/**
* Writes a long as a typed bytes sequence.
*
* @param l the long to be written
* @throws IOException
*/
public void writeLong(long l) throws IOException {
out.write(Type.LONG.code);
out.writeLong(l);
}
/**
* Writes a float as a typed bytes sequence.
*
* @param f the float to be written
* @throws IOException
*/
public void writeFloat(float f) throws IOException {
out.write(Type.FLOAT.code);
out.writeFloat(f);
}
/**
* Writes a double as a typed bytes sequence.
*
* @param d the double to be written
* @throws IOException
*/
public void writeDouble(double d) throws IOException {
out.write(Type.DOUBLE.code);
out.writeDouble(d);
}
/**
* Writes a string as a typed bytes sequence.
*
* @param s the string to be written
* @throws IOException
*/
public void writeString(String s) throws IOException {
out.write(Type.STRING.code);
WritableUtils.writeString(out, s);
}
/**
* Writes a vector as a typed bytes sequence.
*
* @param vector the vector to be written
* @throws IOException
*/
public void writeVector(ArrayList vector) throws IOException {
writeVectorHeader(vector.size());
for (Object obj : vector) {
write(obj);
}
}
/**
* Writes a vector header.
*
* @param length the number of elements in the vector
* @throws IOException
*/
public void writeVectorHeader(int length) throws IOException {
out.write(Type.VECTOR.code);
out.writeInt(length);
}
/**
* Writes a list as a typed bytes sequence.
*
* @param list the list to be written
* @throws IOException
*/
public void writeList(List list) throws IOException {
writeListHeader();
for (Object obj : list) {
write(obj);
}
writeListFooter();
}
/**
* Writes a list header.
*
* @throws IOException
*/
public void writeListHeader() throws IOException {
out.write(Type.LIST.code);
}
/**
* Writes a list footer.
*
* @throws IOException
*/
public void writeListFooter() throws IOException {
out.write(Type.MARKER.code);
}
/**
* Writes a map as a typed bytes sequence.
*
* @param map the map to be written
* @throws IOException
*/
@SuppressWarnings("unchecked")
public void writeMap(Map map) throws IOException {
writeMapHeader(map.size());
Set<Entry> entries = map.entrySet();
for (Entry entry : entries) {
write(entry.getKey());
write(entry.getValue());
}
}
/**
* Writes a map header.
*
* @param length the number of key-value pairs in the map
* @throws IOException
*/
public void writeMapHeader(int length) throws IOException {
out.write(Type.MAP.code);
out.writeInt(length);
}
}
| 8,527 | 24.456716 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesInput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
import java.io.DataInput;
import java.io.EOFException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.TreeMap;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.record.Buffer;
/**
* Provides functionality for reading typed bytes.
*/
public class TypedBytesInput {
private DataInput in;
private TypedBytesInput() {}
private void setDataInput(DataInput in) {
this.in = in;
}
private static final ThreadLocal<TypedBytesInput> TB_IN =
new ThreadLocal<TypedBytesInput>() {
@Override
protected TypedBytesInput initialValue() {
return new TypedBytesInput();
}
};
/**
* Get a thread-local typed bytes input for the supplied {@link DataInput}.
* @param in data input object
* @return typed bytes input corresponding to the supplied {@link DataInput}.
*/
public static TypedBytesInput get(DataInput in) {
TypedBytesInput bin = TB_IN.get();
bin.setDataInput(in);
return bin;
}
/** Creates a new instance of TypedBytesInput. */
public TypedBytesInput(DataInput in) {
this.in = in;
}
/**
* Reads a typed bytes sequence and converts it to a Java object. The first
* byte is interpreted as a type code, and then the right number of
* subsequent bytes are read depending on the obtained type.
* @return the obtained object or null when the end of the file is reached
* @throws IOException
*/
public Object read() throws IOException {
int code = 1;
try {
code = in.readUnsignedByte();
} catch (EOFException eof) {
return null;
}
if (code == Type.BYTES.code) {
return new Buffer(readBytes());
} else if (code == Type.BYTE.code) {
return readByte();
} else if (code == Type.BOOL.code) {
return readBool();
} else if (code == Type.INT.code) {
return readInt();
} else if (code == Type.LONG.code) {
return readLong();
} else if (code == Type.FLOAT.code) {
return readFloat();
} else if (code == Type.DOUBLE.code) {
return readDouble();
} else if (code == Type.STRING.code) {
return readString();
} else if (code == Type.VECTOR.code) {
return readVector();
} else if (code == Type.LIST.code) {
return readList();
} else if (code == Type.MAP.code) {
return readMap();
} else if (code == Type.MARKER.code) {
return null;
} else if (50 <= code && code <= 200) { // application-specific typecodes
return new Buffer(readBytes());
} else {
throw new RuntimeException("unknown type");
}
}
/**
* Reads a typed bytes sequence. The first byte is interpreted as a type code,
* and then the right number of subsequent bytes are read depending on the
* obtained type.
*
* @return the obtained typed bytes sequence or null when the end of the file
* is reached
* @throws IOException
*/
public byte[] readRaw() throws IOException {
int code = -1;
try {
code = in.readUnsignedByte();
} catch (EOFException eof) {
return null;
}
if (code == Type.BYTES.code) {
return readRawBytes();
} else if (code == Type.BYTE.code) {
return readRawByte();
} else if (code == Type.BOOL.code) {
return readRawBool();
} else if (code == Type.INT.code) {
return readRawInt();
} else if (code == Type.LONG.code) {
return readRawLong();
} else if (code == Type.FLOAT.code) {
return readRawFloat();
} else if (code == Type.DOUBLE.code) {
return readRawDouble();
} else if (code == Type.STRING.code) {
return readRawString();
} else if (code == Type.VECTOR.code) {
return readRawVector();
} else if (code == Type.LIST.code) {
return readRawList();
} else if (code == Type.MAP.code) {
return readRawMap();
} else if (code == Type.MARKER.code) {
return null;
} else if (50 <= code && code <= 200) { // application-specific typecodes
return readRawBytes(code);
} else {
throw new RuntimeException("unknown type");
}
}
/**
* Reads a type byte and returns the corresponding {@link Type}.
* @return the obtained Type or null when the end of the file is reached
* @throws IOException
*/
public Type readType() throws IOException {
int code = -1;
try {
code = in.readUnsignedByte();
} catch (EOFException eof) {
return null;
}
for (Type type : Type.values()) {
if (type.code == code) {
return type;
}
}
return null;
}
/**
* Skips a type byte.
* @return true iff the end of the file was not reached
* @throws IOException
*/
public boolean skipType() throws IOException {
try {
in.readByte();
return true;
} catch (EOFException eof) {
return false;
}
}
/**
* Reads the bytes following a <code>Type.BYTES</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readBytes() throws IOException {
int length = in.readInt();
byte[] bytes = new byte[length];
in.readFully(bytes);
return bytes;
}
/**
* Reads the raw bytes following a custom code.
* @param code the custom type code
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawBytes(int code) throws IOException {
int length = in.readInt();
byte[] bytes = new byte[5 + length];
bytes[0] = (byte) code;
bytes[1] = (byte) (0xff & (length >> 24));
bytes[2] = (byte) (0xff & (length >> 16));
bytes[3] = (byte) (0xff & (length >> 8));
bytes[4] = (byte) (0xff & length);
in.readFully(bytes, 5, length);
return bytes;
}
/**
* Reads the raw bytes following a <code>Type.BYTES</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawBytes() throws IOException {
return readRawBytes(Type.BYTES.code);
}
/**
* Reads the byte following a <code>Type.BYTE</code> code.
* @return the obtained byte
* @throws IOException
*/
public byte readByte() throws IOException {
return in.readByte();
}
/**
* Reads the raw byte following a <code>Type.BYTE</code> code.
* @return the obtained byte
* @throws IOException
*/
public byte[] readRawByte() throws IOException {
byte[] bytes = new byte[2];
bytes[0] = (byte) Type.BYTE.code;
in.readFully(bytes, 1, 1);
return bytes;
}
/**
* Reads the boolean following a <code>Type.BOOL</code> code.
* @return the obtained boolean
* @throws IOException
*/
public boolean readBool() throws IOException {
return in.readBoolean();
}
/**
* Reads the raw bytes following a <code>Type.BOOL</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawBool() throws IOException {
byte[] bytes = new byte[2];
bytes[0] = (byte) Type.BOOL.code;
in.readFully(bytes, 1, 1);
return bytes;
}
/**
* Reads the integer following a <code>Type.INT</code> code.
* @return the obtained integer
* @throws IOException
*/
public int readInt() throws IOException {
return in.readInt();
}
/**
* Reads the raw bytes following a <code>Type.INT</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawInt() throws IOException {
byte[] bytes = new byte[5];
bytes[0] = (byte) Type.INT.code;
in.readFully(bytes, 1, 4);
return bytes;
}
/**
* Reads the long following a <code>Type.LONG</code> code.
* @return the obtained long
* @throws IOException
*/
public long readLong() throws IOException {
return in.readLong();
}
/**
* Reads the raw bytes following a <code>Type.LONG</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawLong() throws IOException {
byte[] bytes = new byte[9];
bytes[0] = (byte) Type.LONG.code;
in.readFully(bytes, 1, 8);
return bytes;
}
/**
* Reads the float following a <code>Type.FLOAT</code> code.
* @return the obtained float
* @throws IOException
*/
public float readFloat() throws IOException {
return in.readFloat();
}
/**
* Reads the raw bytes following a <code>Type.FLOAT</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawFloat() throws IOException {
byte[] bytes = new byte[5];
bytes[0] = (byte) Type.FLOAT.code;
in.readFully(bytes, 1, 4);
return bytes;
}
/**
* Reads the double following a <code>Type.DOUBLE</code> code.
* @return the obtained double
* @throws IOException
*/
public double readDouble() throws IOException {
return in.readDouble();
}
/**
* Reads the raw bytes following a <code>Type.DOUBLE</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawDouble() throws IOException {
byte[] bytes = new byte[9];
bytes[0] = (byte) Type.DOUBLE.code;
in.readFully(bytes, 1, 8);
return bytes;
}
/**
* Reads the string following a <code>Type.STRING</code> code.
* @return the obtained string
* @throws IOException
*/
public String readString() throws IOException {
return WritableUtils.readString(in);
}
/**
* Reads the raw bytes following a <code>Type.STRING</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawString() throws IOException {
int length = in.readInt();
byte[] bytes = new byte[5 + length];
bytes[0] = (byte) Type.STRING.code;
bytes[1] = (byte) (0xff & (length >> 24));
bytes[2] = (byte) (0xff & (length >> 16));
bytes[3] = (byte) (0xff & (length >> 8));
bytes[4] = (byte) (0xff & length);
in.readFully(bytes, 5, length);
return bytes;
}
/**
* Reads the vector following a <code>Type.VECTOR</code> code.
* @return the obtained vector
* @throws IOException
*/
@SuppressWarnings("unchecked")
public ArrayList readVector() throws IOException {
int length = readVectorHeader();
ArrayList result = new ArrayList(length);
for (int i = 0; i < length; i++) {
result.add(read());
}
return result;
}
/**
* Reads the raw bytes following a <code>Type.VECTOR</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawVector() throws IOException {
Buffer buffer = new Buffer();
int length = readVectorHeader();
buffer.append(new byte[] {
(byte) Type.VECTOR.code,
(byte) (0xff & (length >> 24)), (byte) (0xff & (length >> 16)),
(byte) (0xff & (length >> 8)), (byte) (0xff & length)
});
for (int i = 0; i < length; i++) {
buffer.append(readRaw());
}
return buffer.get();
}
/**
* Reads the header following a <code>Type.VECTOR</code> code.
* @return the number of elements in the vector
* @throws IOException
*/
public int readVectorHeader() throws IOException {
return in.readInt();
}
/**
* Reads the list following a <code>Type.LIST</code> code.
* @return the obtained list
* @throws IOException
*/
@SuppressWarnings("unchecked")
public List readList() throws IOException {
List list = new ArrayList();
Object obj = read();
while (obj != null) {
list.add(obj);
obj = read();
}
return list;
}
/**
* Reads the raw bytes following a <code>Type.LIST</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawList() throws IOException {
Buffer buffer = new Buffer(new byte[] { (byte) Type.LIST.code });
byte[] bytes = readRaw();
while (bytes != null) {
buffer.append(bytes);
bytes = readRaw();
}
buffer.append(new byte[] { (byte) Type.MARKER.code });
return buffer.get();
}
/**
* Reads the map following a <code>Type.MAP</code> code.
* @return the obtained map
* @throws IOException
*/
@SuppressWarnings("unchecked")
public TreeMap readMap() throws IOException {
int length = readMapHeader();
TreeMap result = new TreeMap();
for (int i = 0; i < length; i++) {
Object key = read();
Object value = read();
result.put(key, value);
}
return result;
}
/**
* Reads the raw bytes following a <code>Type.MAP</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawMap() throws IOException {
Buffer buffer = new Buffer();
int length = readMapHeader();
buffer.append(new byte[] {
(byte) Type.MAP.code,
(byte) (0xff & (length >> 24)), (byte) (0xff & (length >> 16)),
(byte) (0xff & (length >> 8)), (byte) (0xff & length)
});
for (int i = 0; i < length; i++) {
buffer.append(readRaw());
buffer.append(readRaw());
}
return buffer.get();
}
/**
* Reads the header following a <code>Type.MAP</code> code.
* @return the number of key-value pairs in the map
* @throws IOException
*/
public int readMapHeader() throws IOException {
return in.readInt();
}
}
| 14,106 | 26.824458 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/Type.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
/**
* The possible type codes.
*/
public enum Type {
// codes for supported types (< 50):
BYTES(0),
BYTE(1),
BOOL(2),
INT(3),
LONG(4),
FLOAT(5),
DOUBLE(6),
STRING(7),
VECTOR(8),
LIST(9),
MAP(10),
// application-specific codes (50-200):
WRITABLE(50),
// low-level codes (> 200):
MARKER(255);
final int code;
Type(int code) {
this.code = code;
}
}
| 1,245 | 23.92 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.io.BytesWritable;
/**
* Writable for typed bytes.
*/
public class TypedBytesWritable extends BytesWritable {
/** Create a TypedBytesWritable. */
public TypedBytesWritable() {
super();
}
/** Create a TypedBytesWritable with a given byte array as initial value. */
public TypedBytesWritable(byte[] bytes) {
super(bytes);
}
/** Set the typed bytes from a given Java object. */
public void setValue(Object obj) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
TypedBytesOutput tbo = TypedBytesOutput.get(new DataOutputStream(baos));
tbo.write(obj);
byte[] bytes = baos.toByteArray();
set(bytes, 0, bytes.length);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/** Get the typed bytes as a Java object. */
public Object getValue() {
try {
ByteArrayInputStream bais = new ByteArrayInputStream(getBytes());
TypedBytesInput tbi = TypedBytesInput.get(new DataInputStream(bais));
Object obj = tbi.read();
return obj;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/** Get the type code embedded in the first byte. */
public Type getType() {
byte[] bytes = getBytes();
if (bytes == null || bytes.length == 0) {
return null;
}
for (Type type : Type.values()) {
if (type.code == (int) bytes[0]) {
return type;
}
}
return null;
}
/** Generate a suitable string representation. */
public String toString() {
return getValue().toString();
}
}
| 2,611 | 28.348315 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
/**
* A JUnit test for copying files recursively.
*/
@SuppressWarnings("deprecation")
public class TestCopyFiles extends TestCase {
{
((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
).getLogger().setLevel(Level.ERROR);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ERROR);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ERROR);
((Log4JLogger)DistCpV1.LOG).getLogger().setLevel(Level.ALL);
}
static final URI LOCAL_FS = URI.create("file:///");
private static final Random RAN = new Random();
private static final int NFILES = 20;
private static String TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp"))
.toString().replace(' ', '+');
/** class MyFile contains enough information to recreate the contents of
* a single file.
*/
private static class MyFile {
private static Random gen = new Random();
private static final int MAX_LEVELS = 3;
private static final int MAX_SIZE = 8*1024;
private static String[] dirNames = {
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
};
private final String name;
private int size = 0;
private long seed = 0L;
MyFile() {
this(gen.nextInt(MAX_LEVELS));
}
MyFile(int nLevels) {
String xname = "";
if (nLevels != 0) {
int[] levels = new int[nLevels];
for (int idx = 0; idx < nLevels; idx++) {
levels[idx] = gen.nextInt(10);
}
StringBuffer sb = new StringBuffer();
for (int idx = 0; idx < nLevels; idx++) {
sb.append(dirNames[levels[idx]]);
sb.append("/");
}
xname = sb.toString();
}
long fidx = gen.nextLong() & Long.MAX_VALUE;
name = xname + Long.toString(fidx);
reset();
}
void reset() {
final int oldsize = size;
do { size = gen.nextInt(MAX_SIZE); } while (oldsize == size);
final long oldseed = seed;
do { seed = gen.nextLong() & Long.MAX_VALUE; } while (oldseed == seed);
}
String getName() { return name; }
int getSize() { return size; }
long getSeed() { return seed; }
}
private static MyFile[] createFiles(URI fsname, String topdir)
throws IOException {
return createFiles(FileSystem.get(fsname, new Configuration()), topdir);
}
/** create NFILES with random names and directory hierarchies
* with random (but reproducible) data in them.
*/
private static MyFile[] createFiles(FileSystem fs, String topdir)
throws IOException {
Path root = new Path(topdir);
MyFile[] files = new MyFile[NFILES];
for (int i = 0; i < NFILES; i++) {
files[i] = createFile(root, fs);
}
return files;
}
static MyFile createFile(Path root, FileSystem fs, int levels)
throws IOException {
MyFile f = levels < 0 ? new MyFile() : new MyFile(levels);
Path p = new Path(root, f.getName());
FSDataOutputStream out = fs.create(p);
byte[] toWrite = new byte[f.getSize()];
new Random(f.getSeed()).nextBytes(toWrite);
out.write(toWrite);
out.close();
FileSystem.LOG.info("created: " + p + ", size=" + f.getSize());
return f;
}
static MyFile createFile(Path root, FileSystem fs) throws IOException {
return createFile(root, fs, -1);
}
private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files
) throws IOException {
return checkFiles(fs, topdir, files, false);
}
private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files,
boolean existingOnly) throws IOException {
Path root = new Path(topdir);
for (int idx = 0; idx < files.length; idx++) {
Path fPath = new Path(root, files[idx].getName());
try {
fs.getFileStatus(fPath);
FSDataInputStream in = fs.open(fPath);
byte[] toRead = new byte[files[idx].getSize()];
byte[] toCompare = new byte[files[idx].getSize()];
Random rb = new Random(files[idx].getSeed());
rb.nextBytes(toCompare);
assertEquals("Cannnot read file.", toRead.length, in.read(toRead));
in.close();
for (int i = 0; i < toRead.length; i++) {
if (toRead[i] != toCompare[i]) {
return false;
}
}
toRead = null;
toCompare = null;
}
catch(FileNotFoundException fnfe) {
if (!existingOnly) {
throw fnfe;
}
}
}
return true;
}
private static void updateFiles(FileSystem fs, String topdir, MyFile[] files,
int nupdate) throws IOException {
assert nupdate <= NFILES;
Path root = new Path(topdir);
for (int idx = 0; idx < nupdate; ++idx) {
Path fPath = new Path(root, files[idx].getName());
// overwrite file
assertTrue(fPath.toString() + " does not exist", fs.exists(fPath));
FSDataOutputStream out = fs.create(fPath);
files[idx].reset();
byte[] toWrite = new byte[files[idx].getSize()];
Random rb = new Random(files[idx].getSeed());
rb.nextBytes(toWrite);
out.write(toWrite);
out.close();
}
}
private static FileStatus[] getFileStatus(FileSystem fs,
String topdir, MyFile[] files) throws IOException {
return getFileStatus(fs, topdir, files, false);
}
private static FileStatus[] getFileStatus(FileSystem fs,
String topdir, MyFile[] files, boolean existingOnly) throws IOException {
Path root = new Path(topdir);
List<FileStatus> statuses = new ArrayList<FileStatus>();
for (int idx = 0; idx < NFILES; ++idx) {
try {
statuses.add(fs.getFileStatus(new Path(root, files[idx].getName())));
} catch(FileNotFoundException fnfe) {
if (!existingOnly) {
throw fnfe;
}
}
}
return statuses.toArray(new FileStatus[statuses.size()]);
}
private static boolean checkUpdate(FileSystem fs, FileStatus[] old,
String topdir, MyFile[] upd, final int nupdate) throws IOException {
Path root = new Path(topdir);
// overwrote updated files
for (int idx = 0; idx < nupdate; ++idx) {
final FileStatus stat =
fs.getFileStatus(new Path(root, upd[idx].getName()));
if (stat.getModificationTime() <= old[idx].getModificationTime()) {
return false;
}
}
// did not overwrite files not updated
for (int idx = nupdate; idx < NFILES; ++idx) {
final FileStatus stat =
fs.getFileStatus(new Path(root, upd[idx].getName()));
if (stat.getModificationTime() != old[idx].getModificationTime()) {
return false;
}
}
return true;
}
/** delete directory and everything underneath it.*/
private static void deldir(FileSystem fs, String topdir) throws IOException {
fs.delete(new Path(topdir), true);
}
/** copy files from local file system to local file system */
@SuppressWarnings("deprecation")
public void testCopyFromLocalToLocal() throws Exception {
Configuration conf = new Configuration();
FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
ToolRunner.run(new DistCpV1(new Configuration()),
new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
"file:///"+TEST_ROOT_DIR+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
deldir(localfs, TEST_ROOT_DIR+"/destdat");
deldir(localfs, TEST_ROOT_DIR+"/srcdat");
}
/** copy files from dfs file system to dfs file system */
@SuppressWarnings("deprecation")
public void testCopyFromDfsToDfs() throws Exception {
String namenode = null;
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) {
MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
ToolRunner.run(new DistCpV1(conf), new String[] {
"-log",
namenode+"/logs",
namenode+"/srcdat",
namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(hdfs, "/destdat", files));
FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
assertTrue("Log directory does not exist.",
fs.exists(new Path(namenode+"/logs")));
deldir(hdfs, "/destdat");
deldir(hdfs, "/srcdat");
deldir(hdfs, "/logs");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/** copy empty directory on dfs file system */
@SuppressWarnings("deprecation")
public void testEmptyDir() throws Exception {
String namenode = null;
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) {
FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
fs.mkdirs(new Path("/empty"));
ToolRunner.run(new DistCpV1(conf), new String[] {
"-log",
namenode+"/logs",
namenode+"/empty",
namenode+"/dest"});
fs = FileSystem.get(URI.create(namenode+"/destdat"), conf);
assertTrue("Destination directory does not exist.",
fs.exists(new Path(namenode+"/dest")));
deldir(hdfs, "/dest");
deldir(hdfs, "/empty");
deldir(hdfs, "/logs");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/** copy files from local file system to dfs file system */
@SuppressWarnings("deprecation")
public void testCopyFromLocalToDfs() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem hdfs = cluster.getFileSystem();
final String namenode = hdfs.getUri().toString();
if (namenode.startsWith("hdfs://")) {
MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
ToolRunner.run(new DistCpV1(conf), new String[] {
"-log",
namenode+"/logs",
"file:///"+TEST_ROOT_DIR+"/srcdat",
namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(cluster.getFileSystem(), "/destdat", files));
assertTrue("Log directory does not exist.",
hdfs.exists(new Path(namenode+"/logs")));
deldir(hdfs, "/destdat");
deldir(hdfs, "/logs");
deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/** copy files from dfs file system to local file system */
@SuppressWarnings("deprecation")
public void testCopyFromDfsToLocal() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem hdfs = cluster.getFileSystem();
final String namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) {
MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
ToolRunner.run(new DistCpV1(conf), new String[] {
"-log",
"/logs",
namenode+"/srcdat",
"file:///"+TEST_ROOT_DIR+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
assertTrue("Log directory does not exist.",
hdfs.exists(new Path("/logs")));
deldir(localfs, TEST_ROOT_DIR+"/destdat");
deldir(hdfs, "/logs");
deldir(hdfs, "/srcdat");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@SuppressWarnings("deprecation")
public void testCopyDfsToDfsUpdateOverwrite() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
final String namenode = hdfs.getUri().toString();
if (namenode.startsWith("hdfs://")) {
MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
ToolRunner.run(new DistCpV1(conf), new String[] {
"-p",
"-log",
namenode+"/logs",
namenode+"/srcdat",
namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(hdfs, "/destdat", files));
FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
assertTrue("Log directory does not exist.",
fs.exists(new Path(namenode+"/logs")));
FileStatus[] dchkpoint = getFileStatus(hdfs, "/destdat", files);
final int nupdate = NFILES>>2;
updateFiles(cluster.getFileSystem(), "/srcdat", files, nupdate);
deldir(hdfs, "/logs");
ToolRunner.run(new DistCpV1(conf), new String[] {
"-prbugp", // no t to avoid preserving mod. times
"-update",
"-log",
namenode+"/logs",
namenode+"/srcdat",
namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(hdfs, "/destdat", files));
assertTrue("Update failed to replicate all changes in src",
checkUpdate(hdfs, dchkpoint, "/destdat", files, nupdate));
deldir(hdfs, "/logs");
ToolRunner.run(new DistCpV1(conf), new String[] {
"-prbugp", // no t to avoid preserving mod. times
"-overwrite",
"-log",
namenode+"/logs",
namenode+"/srcdat",
namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(hdfs, "/destdat", files));
assertTrue("-overwrite didn't.",
checkUpdate(hdfs, dchkpoint, "/destdat", files, NFILES));
deldir(hdfs, "/destdat");
deldir(hdfs, "/srcdat");
deldir(hdfs, "/logs");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@SuppressWarnings("deprecation")
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
final String namenode = hdfs.getUri().toString();
FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
// Create two files of the same name, same length but different
// contents
final String testfilename = "test";
final String srcData = "act act act";
final String destData = "cat cat cat";
if (namenode.startsWith("hdfs://")) {
deldir(hdfs,"/logs");
Path srcPath = new Path("/srcdat", testfilename);
Path destPath = new Path("/destdat", testfilename);
FSDataOutputStream out = fs.create(srcPath, true);
out.writeUTF(srcData);
out.close();
out = fs.create(destPath, true);
out.writeUTF(destData);
out.close();
// Run with -skipcrccheck option
ToolRunner.run(new DistCpV1(conf), new String[] {
"-p",
"-update",
"-skipcrccheck",
"-log",
namenode+"/logs",
namenode+"/srcdat",
namenode+"/destdat"});
// File should not be overwritten
FSDataInputStream in = hdfs.open(destPath);
String s = in.readUTF();
System.out.println("Dest had: " + s);
assertTrue("Dest got over written even with skip crc",
s.equalsIgnoreCase(destData));
in.close();
deldir(hdfs, "/logs");
// Run without the option
ToolRunner.run(new DistCpV1(conf), new String[] {
"-p",
"-update",
"-log",
namenode+"/logs",
namenode+"/srcdat",
namenode+"/destdat"});
// File should be overwritten
in = hdfs.open(destPath);
s = in.readUTF();
System.out.println("Dest had: " + s);
assertTrue("Dest did not get overwritten without skip crc",
s.equalsIgnoreCase(srcData));
in.close();
deldir(hdfs, "/destdat");
deldir(hdfs, "/srcdat");
deldir(hdfs, "/logs");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@SuppressWarnings("deprecation")
public void testCopyDuplication() throws Exception {
final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration());
try {
MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat");
ToolRunner.run(new DistCpV1(new Configuration()),
new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
"file:///"+TEST_ROOT_DIR+"/src2/srcdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files));
assertEquals(DistCpV1.DuplicationException.ERROR_CODE,
ToolRunner.run(new DistCpV1(new Configuration()),
new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
"file:///"+TEST_ROOT_DIR+"/src2/srcdat",
"file:///"+TEST_ROOT_DIR+"/destdat",}));
}
finally {
deldir(localfs, TEST_ROOT_DIR+"/destdat");
deldir(localfs, TEST_ROOT_DIR+"/srcdat");
deldir(localfs, TEST_ROOT_DIR+"/src2");
}
}
@SuppressWarnings("deprecation")
public void testCopySingleFile() throws Exception {
FileSystem fs = FileSystem.get(LOCAL_FS, new Configuration());
Path root = new Path(TEST_ROOT_DIR+"/srcdat");
try {
MyFile[] files = {createFile(root, fs)};
//copy a dir with a single file
ToolRunner.run(new DistCpV1(new Configuration()),
new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
"file:///"+TEST_ROOT_DIR+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(fs, TEST_ROOT_DIR+"/destdat", files));
//copy a single file
String fname = files[0].getName();
Path p = new Path(root, fname);
FileSystem.LOG.info("fname=" + fname + ", exists? " + fs.exists(p));
ToolRunner.run(new DistCpV1(new Configuration()),
new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat/"+fname,
"file:///"+TEST_ROOT_DIR+"/dest2/"+fname});
assertTrue("Source and destination directories do not match.",
checkFiles(fs, TEST_ROOT_DIR+"/dest2", files));
// single file update should skip copy if destination has the file already
String[] args = {"-update", "file:///"+TEST_ROOT_DIR+"/srcdat/"+fname,
"file:///"+TEST_ROOT_DIR+"/dest2/"+fname};
Configuration conf = new Configuration();
JobConf job = new JobConf(conf, DistCpV1.class);
DistCpV1.Arguments distcpArgs = DistCpV1.Arguments.valueOf(args, conf);
assertFalse("Single file update failed to skip copying even though the "
+ "file exists at destination.", DistCpV1.setup(conf, job, distcpArgs));
//copy single file to existing dir
deldir(fs, TEST_ROOT_DIR+"/dest2");
fs.mkdirs(new Path(TEST_ROOT_DIR+"/dest2"));
MyFile[] files2 = {createFile(root, fs, 0)};
String sname = files2[0].getName();
ToolRunner.run(new DistCpV1(new Configuration()),
new String[] {"-update",
"file:///"+TEST_ROOT_DIR+"/srcdat/"+sname,
"file:///"+TEST_ROOT_DIR+"/dest2/"});
assertTrue("Source and destination directories do not match.",
checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2));
updateFiles(fs, TEST_ROOT_DIR+"/srcdat", files2, 1);
//copy single file to existing dir w/ dst name conflict
ToolRunner.run(new DistCpV1(new Configuration()),
new String[] {"-update",
"file:///"+TEST_ROOT_DIR+"/srcdat/"+sname,
"file:///"+TEST_ROOT_DIR+"/dest2/"});
assertTrue("Source and destination directories do not match.",
checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2));
}
finally {
deldir(fs, TEST_ROOT_DIR+"/destdat");
deldir(fs, TEST_ROOT_DIR+"/dest2");
deldir(fs, TEST_ROOT_DIR+"/srcdat");
}
}
/** tests basedir option copying files from dfs file system to dfs file system */
@SuppressWarnings("deprecation")
public void testBasedir() throws Exception {
String namenode = null;
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) {
MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
ToolRunner.run(new DistCpV1(conf), new String[] {
"-basedir",
"/basedir",
namenode+"/basedir/middle/srcdat",
namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(hdfs, "/destdat/middle/srcdat", files));
deldir(hdfs, "/destdat");
deldir(hdfs, "/basedir");
deldir(hdfs, "/logs");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@SuppressWarnings("deprecation")
public void testPreserveOption() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
String nnUri = FileSystem.getDefaultUri(conf).toString();
FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
{//test preserving user
MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
for(int i = 0; i < srcstat.length; i++) {
fs.setOwner(srcstat[i].getPath(), "u" + i, null);
}
ToolRunner.run(new DistCpV1(conf),
new String[]{"-pu", nnUri+"/srcdat", nnUri+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(fs, "/destdat", files));
FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
for(int i = 0; i < dststat.length; i++) {
assertEquals("i=" + i, "u" + i, dststat[i].getOwner());
}
deldir(fs, "/destdat");
deldir(fs, "/srcdat");
}
{//test preserving group
MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
for(int i = 0; i < srcstat.length; i++) {
fs.setOwner(srcstat[i].getPath(), null, "g" + i);
}
ToolRunner.run(new DistCpV1(conf),
new String[]{"-pg", nnUri+"/srcdat", nnUri+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(fs, "/destdat", files));
FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
for(int i = 0; i < dststat.length; i++) {
assertEquals("i=" + i, "g" + i, dststat[i].getGroup());
}
deldir(fs, "/destdat");
deldir(fs, "/srcdat");
}
{//test preserving mode
MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
FsPermission[] permissions = new FsPermission[srcstat.length];
for(int i = 0; i < srcstat.length; i++) {
permissions[i] = new FsPermission((short)(i & 0666));
fs.setPermission(srcstat[i].getPath(), permissions[i]);
}
ToolRunner.run(new DistCpV1(conf),
new String[]{"-pp", nnUri+"/srcdat", nnUri+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(fs, "/destdat", files));
FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
for(int i = 0; i < dststat.length; i++) {
assertEquals("i=" + i, permissions[i], dststat[i].getPermission());
}
deldir(fs, "/destdat");
deldir(fs, "/srcdat");
}
{//test preserving times
MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
fs.mkdirs(new Path("/srcdat/tmpf1"));
fs.mkdirs(new Path("/srcdat/tmpf2"));
FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
FsPermission[] permissions = new FsPermission[srcstat.length];
for(int i = 0; i < srcstat.length; i++) {
fs.setTimes(srcstat[i].getPath(), 40, 50);
}
ToolRunner.run(new DistCpV1(conf),
new String[]{"-pt", nnUri+"/srcdat", nnUri+"/destdat"});
FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
for(int i = 0; i < dststat.length; i++) {
assertEquals("Modif. Time i=" + i, 40, dststat[i].getModificationTime());
assertEquals("Access Time i=" + i+ srcstat[i].getPath() + "-" + dststat[i].getPath(), 50, dststat[i].getAccessTime());
}
assertTrue("Source and destination directories do not match.",
checkFiles(fs, "/destdat", files));
deldir(fs, "/destdat");
deldir(fs, "/srcdat");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@SuppressWarnings("deprecation")
public void testMapCount() throws Exception {
String namenode = null;
MiniDFSCluster dfs = null;
MiniDFSCluster mr = null;
try {
Configuration conf = new Configuration();
dfs= new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
FileSystem fs = dfs.getFileSystem();
final FsShell shell = new FsShell(conf);
namenode = fs.getUri().toString();
MyFile[] files = createFiles(fs.getUri(), "/srcdat");
long totsize = 0;
for (MyFile f : files) {
totsize += f.getSize();
}
Configuration job = new JobConf(conf);
job.setLong("distcp.bytes.per.map", totsize / 3);
ToolRunner.run(new DistCpV1(job),
new String[] {"-m", "100",
"-log",
namenode+"/logs",
namenode+"/srcdat",
namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(fs, "/destdat", files));
String logdir = namenode + "/logs";
System.out.println(execCmd(shell, "-lsr", logdir));
FileStatus[] logs = fs.listStatus(new Path(logdir));
// rare case where splits are exact, logs.length can be 4
assertTrue( logs.length == 2);
deldir(fs, "/destdat");
deldir(fs, "/logs");
ToolRunner.run(new DistCpV1(job),
new String[] {"-m", "1",
"-log",
namenode+"/logs",
namenode+"/srcdat",
namenode+"/destdat"});
System.out.println(execCmd(shell, "-lsr", logdir));
logs = fs.globStatus(new Path(namenode+"/logs/part*"));
assertTrue("Unexpected map count, logs.length=" + logs.length,
logs.length == 1);
} finally {
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown(); }
}
}
@SuppressWarnings("deprecation")
public void testLimits() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final String nnUri = FileSystem.getDefaultUri(conf).toString();
final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
final DistCpV1 distcp = new DistCpV1(conf);
final FsShell shell = new FsShell(conf);
final String srcrootdir = "/src_root";
final Path srcrootpath = new Path(srcrootdir);
final String dstrootdir = "/dst_root";
final Path dstrootpath = new Path(dstrootdir);
{//test -filelimit
MyFile[] files = createFiles(URI.create(nnUri), srcrootdir);
int filelimit = files.length / 2;
System.out.println("filelimit=" + filelimit);
ToolRunner.run(distcp,
new String[]{"-filelimit", ""+filelimit, nnUri+srcrootdir, nnUri+dstrootdir});
String results = execCmd(shell, "-lsr", dstrootdir);
results = removePrefix(results, dstrootdir);
System.out.println("results=" + results);
FileStatus[] dststat = getFileStatus(fs, dstrootdir, files, true);
assertEquals(filelimit, dststat.length);
deldir(fs, dstrootdir);
deldir(fs, srcrootdir);
}
{//test -sizelimit
createFiles(URI.create(nnUri), srcrootdir);
long sizelimit = fs.getContentSummary(srcrootpath).getLength()/2;
System.out.println("sizelimit=" + sizelimit);
ToolRunner.run(distcp,
new String[]{"-sizelimit", ""+sizelimit, nnUri+srcrootdir, nnUri+dstrootdir});
ContentSummary summary = fs.getContentSummary(dstrootpath);
System.out.println("summary=" + summary);
assertTrue(summary.getLength() <= sizelimit);
deldir(fs, dstrootdir);
deldir(fs, srcrootdir);
}
{//test update
final MyFile[] srcs = createFiles(URI.create(nnUri), srcrootdir);
final long totalsize = fs.getContentSummary(srcrootpath).getLength();
System.out.println("src.length=" + srcs.length);
System.out.println("totalsize =" + totalsize);
fs.mkdirs(dstrootpath);
final int parts = RAN.nextInt(NFILES/3 - 1) + 2;
final int filelimit = srcs.length/parts;
final long sizelimit = totalsize/parts;
System.out.println("filelimit=" + filelimit);
System.out.println("sizelimit=" + sizelimit);
System.out.println("parts =" + parts);
final String[] args = {"-filelimit", ""+filelimit, "-sizelimit", ""+sizelimit,
"-update", nnUri+srcrootdir, nnUri+dstrootdir};
int dstfilecount = 0;
long dstsize = 0;
for(int i = 0; i <= parts; i++) {
ToolRunner.run(distcp, args);
FileStatus[] dststat = getFileStatus(fs, dstrootdir, srcs, true);
System.out.println(i + ") dststat.length=" + dststat.length);
assertTrue(dststat.length - dstfilecount <= filelimit);
ContentSummary summary = fs.getContentSummary(dstrootpath);
System.out.println(i + ") summary.getLength()=" + summary.getLength());
assertTrue(summary.getLength() - dstsize <= sizelimit);
assertTrue(checkFiles(fs, dstrootdir, srcs, true));
dstfilecount = dststat.length;
dstsize = summary.getLength();
}
deldir(fs, dstrootdir);
deldir(fs, srcrootdir);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
static final long now = System.currentTimeMillis();
static UserGroupInformation createUGI(String name, boolean issuper) {
String username = name + now;
String group = issuper? "supergroup": username;
return UserGroupInformation.createUserForTesting(username,
new String[]{group});
}
static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi
) throws IOException {
final Path home = new Path("/user/" + ugi.getUserName());
fs.mkdirs(home);
fs.setOwner(home, ugi.getUserName(), ugi.getGroupNames()[0]);
fs.setPermission(home, new FsPermission((short)0700));
return home;
}
public void testHftpAccessControl() throws Exception {
MiniDFSCluster cluster = null;
try {
final UserGroupInformation DFS_UGI = createUGI("dfs", true);
final UserGroupInformation USER_UGI = createUGI("user", false);
//start cluster by DFS_UGI
final Configuration dfsConf = new Configuration();
cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
cluster.waitActive();
final String httpAdd = dfsConf.get("dfs.http.address");
final URI nnURI = FileSystem.getDefaultUri(dfsConf);
final String nnUri = nnURI.toString();
FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException {
return FileSystem.get(nnURI, dfsConf);
}
});
final Path home =
createHomeDirectory(fs1, USER_UGI);
//now, login as USER_UGI
final Configuration userConf = new Configuration();
final FileSystem fs =
USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException {
return FileSystem.get(nnURI, userConf);
}
});
final Path srcrootpath = new Path(home, "src_root");
final String srcrootdir = srcrootpath.toString();
final Path dstrootpath = new Path(home, "dst_root");
final String dstrootdir = dstrootpath.toString();
final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCpV1>() {
public DistCpV1 run() {
return new DistCpV1(userConf);
}
});
FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};
{ //copy with permission 000, should fail
fs.setPermission(srcrootpath, new FsPermission((short)0));
USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
assertEquals(-3, ToolRunner.run(distcp, args));
return null;
}
});
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/** test -delete */
@SuppressWarnings("deprecation")
public void testDelete() throws Exception {
final Configuration conf = new Configuration();
conf.setInt("fs.trash.interval", 60);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final URI nnURI = FileSystem.getDefaultUri(conf);
final String nnUri = nnURI.toString();
final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
final DistCpV1 distcp = new DistCpV1(conf);
final FsShell shell = new FsShell(conf);
final String srcrootdir = "/src_root";
final String dstrootdir = "/dst_root";
{
//create source files
createFiles(nnURI, srcrootdir);
String srcresults = execCmd(shell, "-lsr", srcrootdir);
srcresults = removePrefix(srcresults, srcrootdir);
System.out.println("srcresults=" + srcresults);
//create some files in dst
createFiles(nnURI, dstrootdir);
System.out.println("dstrootdir=" + dstrootdir);
shell.run(new String[]{"-lsr", dstrootdir});
//run distcp
ToolRunner.run(distcp,
new String[]{"-delete", "-update", "-log", "/log",
nnUri+srcrootdir, nnUri+dstrootdir});
//make sure src and dst contains the same files
String dstresults = execCmd(shell, "-lsr", dstrootdir);
dstresults = removePrefix(dstresults, dstrootdir);
System.out.println("first dstresults=" + dstresults);
assertEquals(srcresults, dstresults);
//create additional file in dst
create(fs, new Path(dstrootdir, "foo"));
create(fs, new Path(dstrootdir, "foobar"));
//run distcp again
ToolRunner.run(distcp,
new String[]{"-delete", "-update", "-log", "/log2",
nnUri+srcrootdir, nnUri+dstrootdir});
//make sure src and dst contains the same files
dstresults = execCmd(shell, "-lsr", dstrootdir);
dstresults = removePrefix(dstresults, dstrootdir);
System.out.println("second dstresults=" + dstresults);
assertEquals(srcresults, dstresults);
// verify that files removed in -delete were moved to the trash
// regrettably, this test will break if Trash changes incompatibly
assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
".Trash/Current" + dstrootdir + "/foo")));
assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
".Trash/Current" + dstrootdir + "/foobar")));
//cleanup
deldir(fs, dstrootdir);
deldir(fs, srcrootdir);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/**
* verify that -delete option works for other {@link FileSystem}
* implementations. See MAPREDUCE-1285 */
@SuppressWarnings("deprecation")
public void testDeleteLocal() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem hdfs = cluster.getFileSystem();
final String namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) {
MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
String destdir = TEST_ROOT_DIR + "/destdat";
MyFile[] localFiles = createFiles(localfs, destdir);
ToolRunner.run(new DistCpV1(conf), new String[] {
"-delete",
"-update",
"-log",
"/logs",
namenode+"/srcdat",
"file:///"+TEST_ROOT_DIR+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(localfs, destdir, files));
assertTrue("Log directory does not exist.",
hdfs.exists(new Path("/logs")));
deldir(localfs, destdir);
deldir(hdfs, "/logs");
deldir(hdfs, "/srcdat");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/** test globbing */
@SuppressWarnings("deprecation")
public void testGlobbing() throws Exception {
String namenode = null;
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) {
MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
ToolRunner.run(new DistCpV1(conf), new String[] {
"-log",
namenode+"/logs",
namenode+"/srcdat/*",
namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
checkFiles(hdfs, "/destdat", files));
FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
assertTrue("Log directory does not exist.",
fs.exists(new Path(namenode+"/logs")));
deldir(hdfs, "/destdat");
deldir(hdfs, "/srcdat");
deldir(hdfs, "/logs");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
static void create(FileSystem fs, Path f) throws IOException {
FSDataOutputStream out = fs.create(f);
try {
byte[] b = new byte[1024 + RAN.nextInt(1024)];
RAN.nextBytes(b);
out.write(b);
} finally {
if (out != null) out.close();
}
}
static String execCmd(FsShell shell, String... args) throws Exception {
ByteArrayOutputStream baout = new ByteArrayOutputStream();
PrintStream out = new PrintStream(baout, true);
PrintStream old = System.out;
System.setOut(out);
shell.run(args);
out.close();
System.setOut(old);
return baout.toString();
}
private static String removePrefix(String lines, String prefix) {
final int prefixlen = prefix.length();
final StringTokenizer t = new StringTokenizer(lines, "\n");
final StringBuffer results = new StringBuffer();
for(; t.hasMoreTokens(); ) {
String s = t.nextToken();
results.append(s.substring(s.indexOf(prefix) + prefixlen) + "\n");
}
return results.toString();
}
}
| 44,941 | 38.56162 | 128 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.junit.Test;
public class TestLogalyzer {
private static String EL = System.getProperty("line.separator");
private static String TAB = "\t";
private static final Log LOG = LogFactory.getLog(TestLogalyzer.class);
private static File workSpace = new File("target",
TestLogalyzer.class.getName() + "-workSpace");
private static File outdir = new File(workSpace.getAbsoluteFile()
+ File.separator + "out");
@Test
@SuppressWarnings("deprecation")
public void testLogalyzer() throws Exception {
Path f = createLogFile();
String[] args = new String[10];
args[0] = "-archiveDir";
args[1] = f.toString();
args[2] = "-grep";
args[3] = "44";
args[4] = "-sort";
args[5] = "0";
args[6] = "-analysis";
args[7] = outdir.getAbsolutePath();
args[8] = "-separator";
args[9] = " ";
Logalyzer.main(args);
checkResult();
}
private void checkResult() throws Exception {
File result = new File(outdir.getAbsolutePath() + File.separator
+ "part-00000");
File success = new File(outdir.getAbsolutePath() + File.separator
+ "_SUCCESS");
Assert.assertTrue(success.exists());
FileInputStream fis = new FileInputStream(result);
BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
String line = br.readLine();
Assert.assertTrue(("1 44" + TAB + "2").equals(line));
line = br.readLine();
Assert.assertTrue(("3 44" + TAB + "1").equals(line));
line = br.readLine();
Assert.assertTrue(("4 44" + TAB + "1").equals(line));
br.close();
}
/**
* Create simple log file
*
* @return
* @throws IOException
*/
private Path createLogFile() throws IOException {
FileContext files = FileContext.getLocalFSFileContext();
Path ws = new Path(workSpace.getAbsoluteFile().getAbsolutePath());
files.delete(ws, true);
Path workSpacePath = new Path(workSpace.getAbsolutePath(), "log");
files.mkdir(workSpacePath, null, true);
LOG.info("create logfile.log");
Path logfile1 = new Path(workSpacePath, "logfile.log");
FSDataOutputStream os = files.create(logfile1,
EnumSet.of(CreateFlag.CREATE));
os.writeBytes("4 3" + EL + "1 3" + EL + "4 44" + EL);
os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);
os.flush();
os.close();
LOG.info("create logfile1.log");
Path logfile2 = new Path(workSpacePath, "logfile1.log");
os = files.create(logfile2, EnumSet.of(CreateFlag.CREATE));
os.writeBytes("4 3" + EL + "1 3" + EL + "3 44" + EL);
os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);
os.flush();
os.close();
return workSpacePath;
}
}
| 4,105 | 29.641791 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Random;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.log4j.Level;
public class TestDistCh extends junit.framework.TestCase {
{
((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
).getLogger().setLevel(Level.ERROR);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ERROR);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ERROR);
}
static final Long RANDOM_NUMBER_GENERATOR_SEED = null;
static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
private static final Random RANDOM = new Random();
static {
final long seed = RANDOM_NUMBER_GENERATOR_SEED == null?
RANDOM.nextLong(): RANDOM_NUMBER_GENERATOR_SEED;
System.out.println("seed=" + seed);
RANDOM.setSeed(seed);
}
static final String TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp")
).toString().replace(' ', '+');
static final int NUN_SUBS = 7;
static class FileTree {
private final FileSystem fs;
private final String root;
private final Path rootdir;
private int fcount = 0;
Path createSmallFile(Path dir) throws IOException {
final Path f = new Path(dir, "f" + ++fcount);
assertTrue(!fs.exists(f));
final DataOutputStream out = fs.create(f);
try {
out.writeBytes("createSmallFile: f=" + f);
} finally {
out.close();
}
assertTrue(fs.exists(f));
return f;
}
Path mkdir(Path dir) throws IOException {
assertTrue(fs.mkdirs(dir));
assertTrue(fs.getFileStatus(dir).isDirectory());
return dir;
}
FileTree(FileSystem fs, String name) throws IOException {
this.fs = fs;
this.root = "/test/" + name;
this.rootdir = mkdir(new Path(root));
for(int i = 0; i < 3; i++) {
createSmallFile(rootdir);
}
for(int i = 0; i < NUN_SUBS; i++) {
final Path sub = mkdir(new Path(root, "sub" + i));
int num_files = RANDOM.nextInt(3);
for(int j = 0; j < num_files; j++) {
createSmallFile(sub);
}
}
System.out.println("rootdir = " + rootdir);
}
}
static class ChPermissionStatus extends PermissionStatus {
private final boolean defaultPerm;
ChPermissionStatus(FileStatus filestatus) {
this(filestatus, "", "", "");
}
ChPermissionStatus(FileStatus filestatus, String owner, String group, String permission) {
super("".equals(owner)? filestatus.getOwner(): owner,
"".equals(group)? filestatus.getGroup(): group,
"".equals(permission)? filestatus.getPermission(): new FsPermission(Short.parseShort(permission, 8)));
defaultPerm = permission == null || "".equals(permission);
}
}
public void testDistCh() throws Exception {
final Configuration conf = new Configuration();
conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+"."+CapacitySchedulerConfiguration.QUEUES, "default");
conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+".default."+CapacitySchedulerConfiguration.CAPACITY, "100");
final MiniDFSCluster cluster= new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
final FileSystem fs = cluster.getFileSystem();
final FsShell shell = new FsShell(conf);
try {
final FileTree tree = new FileTree(fs, "testDistCh");
final FileStatus rootstatus = fs.getFileStatus(tree.rootdir);
runLsr(shell, tree.root, 0);
final String[] args = new String[NUN_SUBS];
final ChPermissionStatus[] newstatus = new ChPermissionStatus[NUN_SUBS];
args[0]="/test/testDistCh/sub0:sub1::";
newstatus[0] = new ChPermissionStatus(rootstatus, "sub1", "", "");
args[1]="/test/testDistCh/sub1::sub2:";
newstatus[1] = new ChPermissionStatus(rootstatus, "", "sub2", "");
args[2]="/test/testDistCh/sub2:::437";
newstatus[2] = new ChPermissionStatus(rootstatus, "", "", "437");
args[3]="/test/testDistCh/sub3:sub1:sub2:447";
newstatus[3] = new ChPermissionStatus(rootstatus, "sub1", "sub2", "447");
args[4]="/test/testDistCh/sub4::sub5:437";
newstatus[4] = new ChPermissionStatus(rootstatus, "", "sub5", "437");
args[5]="/test/testDistCh/sub5:sub1:sub5:";
newstatus[5] = new ChPermissionStatus(rootstatus, "sub1", "sub5", "");
args[6]="/test/testDistCh/sub6:sub3::437";
newstatus[6] = new ChPermissionStatus(rootstatus, "sub3", "", "437");
System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n "));
System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n "));
//run DistCh
new DistCh(MiniMRClientClusterFactory.create(this.getClass(), 2, conf).getConfig()).run(args);
runLsr(shell, tree.root, 0);
//check results
for(int i = 0; i < NUN_SUBS; i++) {
Path sub = new Path(tree.root + "/sub" + i);
checkFileStatus(newstatus[i], fs.getFileStatus(sub));
for(FileStatus status : fs.listStatus(sub)) {
checkFileStatus(newstatus[i], status);
}
}
} finally {
cluster.shutdown();
}
}
static void checkFileStatus(ChPermissionStatus expected, FileStatus actual) {
assertEquals(expected.getUserName(), actual.getOwner());
assertEquals(expected.getGroupName(), actual.getGroup());
FsPermission perm = expected.getPermission();
if (actual.isFile() && expected.defaultPerm) {
perm = perm.applyUMask(UMASK);
}
assertEquals(perm, actual.getPermission());
}
private static String runLsr(final FsShell shell, String root, int returnvalue
) throws Exception {
System.out.println("root=" + root + ", returnvalue=" + returnvalue);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
final PrintStream oldErr = System.err;
System.setOut(out);
System.setErr(out);
final String results;
try {
assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
results = bytes.toString();
} finally {
IOUtils.closeStream(out);
System.setOut(oldOut);
System.setErr(oldErr);
}
System.out.println("results:\n" + results);
return results;
}
}
| 8,209 | 35.816143 | 147 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/mapred/tools/TestGetGroups.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.tools;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.tools.GetGroups;
import org.apache.hadoop.tools.GetGroupsTestBase;
import org.apache.hadoop.util.Tool;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
/**
* Tests for the MR implementation of {@link GetGroups}
*/
@Ignore
public class TestGetGroups extends GetGroupsTestBase {
private MiniMRCluster cluster;
@Before
public void setUpJobTracker() throws IOException, InterruptedException {
cluster = new MiniMRCluster(0, "file:///", 1);
conf = cluster.createJobConf();
}
@After
public void tearDownJobTracker() throws IOException {
cluster.shutdown();
}
@Override
protected Tool getTool(PrintStream o) {
return new GetGroups(conf, o);
}
}
| 1,695 | 29.285714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Command-line tools for MapReduce.
*/
package org.apache.hadoop.tools;
| 885 | 37.521739 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCh.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Stack;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.InvalidInputException;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileRecordReader;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
/**
* A Map-reduce program to recursively change files properties
* such as owner, group and permission.
*/
public class DistCh extends DistTool {
static final String NAME = "distch";
static final String JOB_DIR_LABEL = NAME + ".job.dir";
static final String OP_LIST_LABEL = NAME + ".op.list";
static final String OP_COUNT_LABEL = NAME + ".op.count";
static final String USAGE = "java " + DistCh.class.getName()
+ " [OPTIONS] <path:owner:group:permission>+ "
+ "\n\nThe values of owner, group and permission can be empty."
+ "\nPermission is a octal number."
+ "\n\nOPTIONS:"
+ "\n-f <urilist_uri> Use list at <urilist_uri> as src list"
+ "\n-i Ignore failures"
+ "\n-log <logdir> Write logs to <logdir>"
;
private static final long OP_PER_MAP = 1000;
private static final int MAX_MAPS_PER_NODE = 20;
private static final int SYNC_FILE_MAX = 10;
static enum Counter { SUCCEED, FAIL }
static enum Option {
IGNORE_FAILURES("-i", NAME + ".ignore.failures");
final String cmd, propertyname;
private Option(String cmd, String propertyname) {
this.cmd = cmd;
this.propertyname = propertyname;
}
}
DistCh(Configuration conf) {
super(createJobConf(conf));
}
private static JobConf createJobConf(Configuration conf) {
JobConf jobconf = new JobConf(conf, DistCh.class);
jobconf.setJobName(NAME);
jobconf.setMapSpeculativeExecution(false);
jobconf.setInputFormat(ChangeInputFormat.class);
jobconf.setOutputKeyClass(Text.class);
jobconf.setOutputValueClass(Text.class);
jobconf.setMapperClass(ChangeFilesMapper.class);
jobconf.setNumReduceTasks(0);
return jobconf;
}
/** File operations. */
static class FileOperation implements Writable {
private Path src;
private String owner;
private String group;
private FsPermission permission;
FileOperation() {}
FileOperation(Path src, FileOperation that) {
this.src = src;
this.owner = that.owner;
this.group = that.group;
this.permission = that.permission;
checkState();
}
/**
* path:owner:group:permission
* e.g.
* /user/foo:foo:bar:700
*/
FileOperation(String line) {
try {
String[] t = line.split(":", 4);
for(int i = 0; i < t.length; i++) {
if ("".equals(t[i])) {
t[i] = null;
}
}
src = new Path(t[0]);
owner = t[1];
group = t[2];
permission = t[3] == null? null:
new FsPermission(Short.parseShort(t[3], 8));
checkState();
}
catch(Exception e) {
throw (IllegalArgumentException)new IllegalArgumentException(
"line=" + line).initCause(e);
}
}
private void checkState() throws IllegalStateException {
if (owner == null && group == null && permission == null) {
throw new IllegalStateException(
"owner == null && group == null && permission == null");
}
}
static final FsPermission FILE_UMASK
= FsPermission.createImmutable((short)0111);
private boolean isDifferent(FileStatus original) {
if (owner != null && !owner.equals(original.getOwner())) {
return true;
}
if (group != null && !group.equals(original.getGroup())) {
return true;
}
if (permission != null) {
FsPermission orig = original.getPermission();
return original.isDirectory()? !permission.equals(orig):
!permission.applyUMask(FILE_UMASK).equals(orig);
}
return false;
}
void run(Configuration conf) throws IOException {
FileSystem fs = src.getFileSystem(conf);
if (permission != null) {
fs.setPermission(src, permission);
}
if (owner != null || group != null) {
fs.setOwner(src, owner, group);
}
}
/** {@inheritDoc} */
public void readFields(DataInput in) throws IOException {
this.src = new Path(Text.readString(in));
owner = DistTool.readString(in);
group = DistTool.readString(in);
permission = in.readBoolean()? FsPermission.read(in): null;
}
/** {@inheritDoc} */
public void write(DataOutput out) throws IOException {
Text.writeString(out, src.toString());
DistTool.writeString(out, owner);
DistTool.writeString(out, group);
boolean b = permission != null;
out.writeBoolean(b);
if (b) {permission.write(out);}
}
/** {@inheritDoc} */
public String toString() {
return src + ":" + owner + ":" + group + ":" + permission;
}
}
/** Responsible for generating splits of the src file list. */
static class ChangeInputFormat implements InputFormat<Text, FileOperation> {
/** Do nothing. */
public void validateInput(JobConf job) {}
/**
* Produce splits such that each is no greater than the quotient of the
* total size and the number of splits requested.
* @param job The handle to the JobConf object
* @param numSplits Number of splits requested
*/
public InputSplit[] getSplits(JobConf job, int numSplits
) throws IOException {
final int srcCount = job.getInt(OP_COUNT_LABEL, -1);
final int targetcount = srcCount / numSplits;
String srclist = job.get(OP_LIST_LABEL, "");
if (srcCount < 0 || "".equals(srclist)) {
throw new RuntimeException("Invalid metadata: #files(" + srcCount +
") listuri(" + srclist + ")");
}
Path srcs = new Path(srclist);
FileSystem fs = srcs.getFileSystem(job);
List<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
Text key = new Text();
FileOperation value = new FileOperation();
long prev = 0L;
int count = 0; //count src
try (SequenceFile.Reader in = new SequenceFile.Reader(fs, srcs, job)) {
for ( ; in.next(key, value); ) {
long curr = in.getPosition();
long delta = curr - prev;
if (++count > targetcount) {
count = 0;
splits.add(new FileSplit(srcs, prev, delta, (String[])null));
prev = curr;
}
}
}
long remaining = fs.getFileStatus(srcs).getLen() - prev;
if (remaining != 0) {
splits.add(new FileSplit(srcs, prev, remaining, (String[])null));
}
LOG.info("numSplits=" + numSplits + ", splits.size()=" + splits.size());
return splits.toArray(new FileSplit[splits.size()]);
}
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
return new SequenceFileRecordReader<Text, FileOperation>(job,
(FileSplit)split);
}
}
/** The mapper for changing files. */
static class ChangeFilesMapper
implements Mapper<Text, FileOperation, WritableComparable<?>, Text> {
private JobConf jobconf;
private boolean ignoreFailures;
private int failcount = 0;
private int succeedcount = 0;
private String getCountString() {
return "Succeeded: " + succeedcount + " Failed: " + failcount;
}
/** {@inheritDoc} */
public void configure(JobConf job) {
this.jobconf = job;
ignoreFailures=job.getBoolean(Option.IGNORE_FAILURES.propertyname,false);
}
/** Run a FileOperation */
public void map(Text key, FileOperation value,
OutputCollector<WritableComparable<?>, Text> out, Reporter reporter
) throws IOException {
try {
value.run(jobconf);
++succeedcount;
reporter.incrCounter(Counter.SUCCEED, 1);
} catch (IOException e) {
++failcount;
reporter.incrCounter(Counter.FAIL, 1);
String s = "FAIL: " + value + ", " + StringUtils.stringifyException(e);
out.collect(null, new Text(s));
LOG.info(s);
} finally {
reporter.setStatus(getCountString());
}
}
/** {@inheritDoc} */
public void close() throws IOException {
if (failcount == 0 || ignoreFailures) {
return;
}
throw new IOException(getCountString());
}
}
private static void check(Configuration conf, List<FileOperation> ops
) throws InvalidInputException {
List<Path> srcs = new ArrayList<Path>();
for(FileOperation op : ops) {
srcs.add(op.src);
}
DistTool.checkSource(conf, srcs);
}
private static List<FileOperation> fetchList(Configuration conf, Path inputfile
) throws IOException {
List<FileOperation> result = new ArrayList<FileOperation>();
for(String line : readFile(conf, inputfile)) {
result.add(new FileOperation(line));
}
return result;
}
/** This is the main driver for recursively changing files properties. */
public int run(String[] args) throws Exception {
List<FileOperation> ops = new ArrayList<FileOperation>();
Path logpath = null;
boolean isIgnoreFailures = false;
try {
for (int idx = 0; idx < args.length; idx++) {
if ("-f".equals(args[idx])) {
if (++idx == args.length) {
System.out.println("urilist_uri not specified");
System.out.println(USAGE);
return -1;
}
ops.addAll(fetchList(jobconf, new Path(args[idx])));
} else if (Option.IGNORE_FAILURES.cmd.equals(args[idx])) {
isIgnoreFailures = true;
} else if ("-log".equals(args[idx])) {
if (++idx == args.length) {
System.out.println("logdir not specified");
System.out.println(USAGE);
return -1;
}
logpath = new Path(args[idx]);
} else if ('-' == args[idx].codePointAt(0)) {
System.out.println("Invalid switch " + args[idx]);
System.out.println(USAGE);
ToolRunner.printGenericCommandUsage(System.out);
return -1;
} else {
ops.add(new FileOperation(args[idx]));
}
}
// mandatory command-line parameters
if (ops.isEmpty()) {
throw new IllegalStateException("Operation is empty");
}
LOG.info("ops=" + ops);
LOG.info("isIgnoreFailures=" + isIgnoreFailures);
jobconf.setBoolean(Option.IGNORE_FAILURES.propertyname, isIgnoreFailures);
check(jobconf, ops);
try {
if (setup(ops, logpath)) {
JobClient.runJob(jobconf);
}
} finally {
try {
if (logpath == null) {
//delete log directory
final Path logdir = FileOutputFormat.getOutputPath(jobconf);
if (logdir != null) {
logdir.getFileSystem(jobconf).delete(logdir, true);
}
}
}
finally {
//delete job directory
final String jobdir = jobconf.get(JOB_DIR_LABEL);
if (jobdir != null) {
final Path jobpath = new Path(jobdir);
jobpath.getFileSystem(jobconf).delete(jobpath, true);
}
}
}
} catch(DuplicationException e) {
LOG.error("Input error:", e);
return DuplicationException.ERROR_CODE;
} catch(Exception e) {
LOG.error(NAME + " failed: ", e);
System.out.println(USAGE);
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
return 0;
}
/** Calculate how many maps to run. */
private static int getMapCount(int srcCount, int numNodes) {
int numMaps = (int)(srcCount / OP_PER_MAP);
numMaps = Math.min(numMaps, numNodes * MAX_MAPS_PER_NODE);
return Math.max(numMaps, 1);
}
private boolean setup(List<FileOperation> ops, Path log)
throws IOException {
final String randomId = getRandomId();
JobClient jClient = new JobClient(jobconf);
Path stagingArea;
try {
stagingArea = JobSubmissionFiles.getStagingDir(
jClient.getClusterHandle(), jobconf);
} catch (InterruptedException ie){
throw new IOException(ie);
}
Path jobdir = new Path(stagingArea + NAME + "_" + randomId);
FsPermission mapredSysPerms =
new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
FileSystem.mkdirs(jClient.getFs(), jobdir, mapredSysPerms);
LOG.info(JOB_DIR_LABEL + "=" + jobdir);
if (log == null) {
log = new Path(jobdir, "_logs");
}
FileOutputFormat.setOutputPath(jobconf, log);
LOG.info("log=" + log);
//create operation list
FileSystem fs = jobdir.getFileSystem(jobconf);
Path opList = new Path(jobdir, "_" + OP_LIST_LABEL);
jobconf.set(OP_LIST_LABEL, opList.toString());
int opCount = 0, synCount = 0;
try (SequenceFile.Writer opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class,
FileOperation.class, SequenceFile.CompressionType.NONE)) {
for(FileOperation op : ops) {
FileStatus srcstat = fs.getFileStatus(op.src);
if (srcstat.isDirectory() && op.isDifferent(srcstat)) {
++opCount;
opWriter.append(new Text(op.src.toString()), op);
}
Stack<Path> pathstack = new Stack<Path>();
for(pathstack.push(op.src); !pathstack.empty(); ) {
for(FileStatus stat : fs.listStatus(pathstack.pop())) {
if (stat.isDirectory()) {
pathstack.push(stat.getPath());
}
if (op.isDifferent(stat)) {
++opCount;
if (++synCount > SYNC_FILE_MAX) {
opWriter.sync();
synCount = 0;
}
Path f = stat.getPath();
opWriter.append(new Text(f.toString()), new FileOperation(f, op));
}
}
}
}
}
checkDuplication(fs, opList, new Path(jobdir, "_sorted"), jobconf);
jobconf.setInt(OP_COUNT_LABEL, opCount);
LOG.info(OP_COUNT_LABEL + "=" + opCount);
jobconf.setNumMapTasks(getMapCount(opCount,
new JobClient(jobconf).getClusterStatus().getTaskTrackers()));
return opCount != 0;
}
private static void checkDuplication(FileSystem fs, Path file, Path sorted,
Configuration conf) throws IOException {
SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs,
new Text.Comparator(), Text.class, FileOperation.class, conf);
sorter.sort(file, sorted);
try (SequenceFile.Reader in = new SequenceFile.Reader(fs, sorted, conf)) {
FileOperation curop = new FileOperation();
Text prevsrc = null, cursrc = new Text();
for(; in.next(cursrc, curop); ) {
if (prevsrc != null && cursrc.equals(prevsrc)) {
throw new DuplicationException(
"Invalid input, there are duplicated files in the sources: "
+ prevsrc + ", " + cursrc);
}
prevsrc = cursrc;
cursrc = new Text();
curop = new FileOperation();
}
}
}
public static void main(String[] args) throws Exception {
System.exit(ToolRunner.run(new DistCh(new Configuration()), args));
}
}
| 17,158 | 32.645098 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.BufferedReader;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.InvalidInputException;
import org.apache.hadoop.mapred.JobConf;
/**
* An abstract class for distributed tool for file related operations.
*/
abstract class DistTool implements org.apache.hadoop.util.Tool {
protected static final Log LOG = LogFactory.getLog(DistTool.class);
protected JobConf jobconf;
/** {@inheritDoc} */
public void setConf(Configuration conf) {
if (jobconf != conf) {
jobconf = conf instanceof JobConf? (JobConf)conf: new JobConf(conf);
}
}
/** {@inheritDoc} */
public JobConf getConf() {return jobconf;}
protected DistTool(Configuration conf) {setConf(conf);}
private static final Random RANDOM = new Random();
protected static String getRandomId() {
return Integer.toString(RANDOM.nextInt(Integer.MAX_VALUE), 36);
}
/** Sanity check for source */
protected static void checkSource(Configuration conf, List<Path> srcs
) throws InvalidInputException {
List<IOException> ioes = new ArrayList<IOException>();
for(Path p : srcs) {
try {
if (!p.getFileSystem(conf).exists(p)) {
ioes.add(new FileNotFoundException("Source "+p+" does not exist."));
}
}
catch(IOException e) {ioes.add(e);}
}
if (!ioes.isEmpty()) {
throw new InvalidInputException(ioes);
}
}
protected static String readString(DataInput in) throws IOException {
if (in.readBoolean()) {
return Text.readString(in);
}
return null;
}
protected static void writeString(DataOutput out, String s
) throws IOException {
boolean b = s != null;
out.writeBoolean(b);
if (b) {Text.writeString(out, s);}
}
protected static List<String> readFile(Configuration conf, Path inputfile
) throws IOException {
List<String> result = new ArrayList<String>();
FileSystem fs = inputfile.getFileSystem(conf);
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.open(inputfile),
Charset.forName("UTF-8")))) {
for(String line; (line = input.readLine()) != null;) {
result.add(line);
}
}
return result;
}
/** An exception class for duplicated source files. */
public static class DuplicationException extends IOException {
private static final long serialVersionUID = 1L;
/** Error code for this exception */
public static final int ERROR_CODE = -2;
DuplicationException(String message) {super(message);}
}
}
| 3,847 | 32.172414 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.BufferedReader;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.Stack;
import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.SequenceFile.Writer;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.InvalidInputException;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileRecordReader;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
/**
* A Map-reduce program to recursively copy directories between
* different file-systems.
*/
@Deprecated
public class DistCpV1 implements Tool {
public static final Log LOG = LogFactory.getLog(DistCpV1.class);
private static final String NAME = "distcp";
private static final String usage = NAME
+ " [OPTIONS] <srcurl>* <desturl>" +
"\n\nOPTIONS:" +
"\n-p[rbugpt] Preserve status" +
"\n r: replication number" +
"\n b: block size" +
"\n u: user" +
"\n g: group" +
"\n p: permission" +
"\n t: modification and access times" +
"\n -p alone is equivalent to -prbugpt" +
"\n-i Ignore failures" +
"\n-basedir <basedir> Use <basedir> as the base directory when copying files from <srcurl>" +
"\n-log <logdir> Write logs to <logdir>" +
"\n-m <num_maps> Maximum number of simultaneous copies" +
"\n-overwrite Overwrite destination" +
"\n-update Overwrite if src size different from dst size" +
"\n-skipcrccheck Do not use CRC check to determine if src is " +
"\n different from dest. Relevant only if -update" +
"\n is specified" +
"\n-f <urilist_uri> Use list at <urilist_uri> as src list" +
"\n-filelimit <n> Limit the total number of files to be <= n" +
"\n-sizelimit <n> Limit the total size to be <= n bytes" +
"\n-delete Delete the files existing in the dst but not in src" +
"\n-dryrun Display count of files and total size of files" +
"\n in src and then exit. Copy is not done at all." +
"\n desturl should not be speicified with out -update." +
"\n-mapredSslConf <f> Filename of SSL configuration for mapper task" +
"\n\nNOTE 1: if -overwrite or -update are set, each source URI is " +
"\n interpreted as an isomorphic update to an existing directory." +
"\nFor example:" +
"\nhadoop " + NAME + " -p -update \"hdfs://A:8020/user/foo/bar\" " +
"\"hdfs://B:8020/user/foo/baz\"\n" +
"\n would update all descendants of 'baz' also in 'bar'; it would " +
"\n *not* update /user/foo/baz/bar" +
"\n\nNOTE 2: The parameter <n> in -filelimit and -sizelimit can be " +
"\n specified with symbolic representation. For examples," +
"\n 1230k = 1230 * 1024 = 1259520" +
"\n 891g = 891 * 1024^3 = 956703965184" +
"\n";
private static final long BYTES_PER_MAP = 256 * 1024 * 1024;
private static final int MAX_MAPS_PER_NODE = 20;
private static final int SYNC_FILE_MAX = 10;
private static final int DEFAULT_FILE_RETRIES = 3;
static enum Counter { COPY, SKIP, FAIL, BYTESCOPIED, BYTESEXPECTED }
static enum Options {
DELETE("-delete", NAME + ".delete"),
FILE_LIMIT("-filelimit", NAME + ".limit.file"),
SIZE_LIMIT("-sizelimit", NAME + ".limit.size"),
IGNORE_READ_FAILURES("-i", NAME + ".ignore.read.failures"),
PRESERVE_STATUS("-p", NAME + ".preserve.status"),
OVERWRITE("-overwrite", NAME + ".overwrite.always"),
UPDATE("-update", NAME + ".overwrite.ifnewer"),
SKIPCRC("-skipcrccheck", NAME + ".skip.crc.check");
final String cmd, propertyname;
private Options(String cmd, String propertyname) {
this.cmd = cmd;
this.propertyname = propertyname;
}
private long parseLong(String[] args, int offset) {
if (offset == args.length) {
throw new IllegalArgumentException("<n> not specified in " + cmd);
}
long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]);
if (n <= 0) {
throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd);
}
return n;
}
}
static enum FileAttribute {
BLOCK_SIZE, REPLICATION, USER, GROUP, PERMISSION, TIMES;
final char symbol;
private FileAttribute() {
symbol = StringUtils.toLowerCase(toString()).charAt(0);
}
static EnumSet<FileAttribute> parse(String s) {
if (s == null || s.length() == 0) {
return EnumSet.allOf(FileAttribute.class);
}
EnumSet<FileAttribute> set = EnumSet.noneOf(FileAttribute.class);
FileAttribute[] attributes = values();
for(char c : s.toCharArray()) {
int i = 0;
for(; i < attributes.length && c != attributes[i].symbol; i++);
if (i < attributes.length) {
if (!set.contains(attributes[i])) {
set.add(attributes[i]);
} else {
throw new IllegalArgumentException("There are more than one '"
+ attributes[i].symbol + "' in " + s);
}
} else {
throw new IllegalArgumentException("'" + c + "' in " + s
+ " is undefined.");
}
}
return set;
}
}
static final String TMP_DIR_LABEL = NAME + ".tmp.dir";
static final String DST_DIR_LABEL = NAME + ".dest.path";
static final String JOB_DIR_LABEL = NAME + ".job.dir";
static final String MAX_MAPS_LABEL = NAME + ".max.map.tasks";
static final String SRC_LIST_LABEL = NAME + ".src.list";
static final String SRC_COUNT_LABEL = NAME + ".src.count";
static final String TOTAL_SIZE_LABEL = NAME + ".total.size";
static final String DST_DIR_LIST_LABEL = NAME + ".dst.dir.list";
static final String BYTES_PER_MAP_LABEL = NAME + ".bytes.per.map";
static final String PRESERVE_STATUS_LABEL
= Options.PRESERVE_STATUS.propertyname + ".value";
static final String FILE_RETRIES_LABEL = NAME + ".file.retries";
private JobConf conf;
public void setConf(Configuration conf) {
if (conf instanceof JobConf) {
this.conf = (JobConf) conf;
} else {
this.conf = new JobConf(conf);
}
}
public Configuration getConf() {
return conf;
}
public DistCpV1(Configuration conf) {
setConf(conf);
}
/**
* An input/output pair of filenames.
*/
static class FilePair implements Writable {
FileStatus input = new FileStatus();
String output;
FilePair() { }
FilePair(FileStatus input, String output) {
this.input = input;
this.output = output;
}
public void readFields(DataInput in) throws IOException {
input.readFields(in);
output = Text.readString(in);
}
public void write(DataOutput out) throws IOException {
input.write(out);
Text.writeString(out, output);
}
public String toString() {
return input + " : " + output;
}
}
/**
* InputFormat of a distcp job responsible for generating splits of the src
* file list.
*/
static class CopyInputFormat implements InputFormat<Text, Text> {
/**
* Produce splits such that each is no greater than the quotient of the
* total size and the number of splits requested.
* @param job The handle to the JobConf object
* @param numSplits Number of splits requested
*/
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
int cnfiles = job.getInt(SRC_COUNT_LABEL, -1);
long cbsize = job.getLong(TOTAL_SIZE_LABEL, -1);
String srcfilelist = job.get(SRC_LIST_LABEL, "");
if (cnfiles < 0 || cbsize < 0 || "".equals(srcfilelist)) {
throw new RuntimeException("Invalid metadata: #files(" + cnfiles +
") total_size(" + cbsize + ") listuri(" +
srcfilelist + ")");
}
Path src = new Path(srcfilelist);
FileSystem fs = src.getFileSystem(job);
FileStatus srcst = fs.getFileStatus(src);
ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
LongWritable key = new LongWritable();
FilePair value = new FilePair();
final long targetsize = cbsize / numSplits;
long pos = 0L;
long last = 0L;
long acc = 0L;
long cbrem = srcst.getLen();
try (SequenceFile.Reader sl =
new SequenceFile.Reader(job, Reader.file(src))) {
for (; sl.next(key, value); last = sl.getPosition()) {
// if adding this split would put this split past the target size,
// cut the last split and put this next file in the next split.
if (acc + key.get() > targetsize && acc != 0) {
long splitsize = last - pos;
splits.add(new FileSplit(src, pos, splitsize, (String[])null));
cbrem -= splitsize;
pos = last;
acc = 0L;
}
acc += key.get();
}
}
if (cbrem != 0) {
splits.add(new FileSplit(src, pos, cbrem, (String[])null));
}
return splits.toArray(new FileSplit[splits.size()]);
}
/**
* Returns a reader for this split of the src file list.
*/
public RecordReader<Text, Text> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
}
/**
* FSCopyFilesMapper: The mapper for copying files between FileSystems.
*/
static class CopyFilesMapper
implements Mapper<LongWritable, FilePair, WritableComparable<?>, Text> {
// config
private int sizeBuf = 128 * 1024;
private FileSystem destFileSys = null;
private boolean ignoreReadFailures;
private boolean preserve_status;
private EnumSet<FileAttribute> preseved;
private boolean overwrite;
private boolean update;
private Path destPath = null;
private byte[] buffer = null;
private JobConf job;
private boolean skipCRCCheck = false;
// stats
private int failcount = 0;
private int skipcount = 0;
private int copycount = 0;
private String getCountString() {
return "Copied: " + copycount + " Skipped: " + skipcount
+ " Failed: " + failcount;
}
private void updateStatus(Reporter reporter) {
reporter.setStatus(getCountString());
}
/**
* Return true if dst should be replaced by src and the update flag is set.
* Right now, this merely checks that the src and dst len are not equal.
* This should be improved on once modification times, CRCs, etc. can
* be meaningful in this context.
* @throws IOException
*/
private boolean needsUpdate(FileStatus srcstatus,
FileSystem dstfs, Path dstpath) throws IOException {
return update && !sameFile(srcstatus.getPath().getFileSystem(job),
srcstatus, dstfs, dstpath, skipCRCCheck);
}
private FSDataOutputStream create(Path f, Reporter reporter,
FileStatus srcstat) throws IOException {
if (destFileSys.exists(f)) {
destFileSys.delete(f, false);
}
if (!preserve_status) {
return destFileSys.create(f, true, sizeBuf, reporter);
}
FsPermission permission = preseved.contains(FileAttribute.PERMISSION)?
srcstat.getPermission(): null;
short replication = preseved.contains(FileAttribute.REPLICATION)?
srcstat.getReplication(): destFileSys.getDefaultReplication(f);
long blockSize = preseved.contains(FileAttribute.BLOCK_SIZE)?
srcstat.getBlockSize(): destFileSys.getDefaultBlockSize(f);
return destFileSys.create(f, permission, true, sizeBuf, replication,
blockSize, reporter);
}
/**
* Validates copy by checking the sizes of files first and then
* checksums, if the filesystems support checksums.
* @param srcstat src path and metadata
* @param absdst dst path
* @return true if src & destination files are same
*/
private boolean validateCopy(FileStatus srcstat, Path absdst)
throws IOException {
if (destFileSys.exists(absdst)) {
if (sameFile(srcstat.getPath().getFileSystem(job), srcstat,
destFileSys, absdst, skipCRCCheck)) {
return true;
}
}
return false;
}
/**
* Increment number of files copied and bytes copied and then report status
*/
void updateCopyStatus(FileStatus srcstat, Reporter reporter) {
copycount++;
reporter.incrCounter(Counter.BYTESCOPIED, srcstat.getLen());
reporter.incrCounter(Counter.COPY, 1);
updateStatus(reporter);
}
/**
* Skip copying this file if already exists at the destination.
* Updates counters and copy status if skipping this file.
* @return true if copy of this file can be skipped
*/
private boolean skipCopyFile(FileStatus srcstat, Path absdst,
OutputCollector<WritableComparable<?>, Text> outc,
Reporter reporter) throws IOException {
if (destFileSys.exists(absdst) && !overwrite
&& !needsUpdate(srcstat, destFileSys, absdst)) {
outc.collect(null, new Text("SKIP: " + srcstat.getPath()));
++skipcount;
reporter.incrCounter(Counter.SKIP, 1);
updateStatus(reporter);
return true;
}
return false;
}
/**
* Copies single file to the path specified by tmpfile.
* @param srcstat src path and metadata
* @param tmpfile temporary file to which copy is to be done
* @param absdst actual destination path to which copy is to be done
* @param reporter
* @return Number of bytes copied
*/
private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
Reporter reporter) throws IOException {
long bytesCopied = 0L;
Path srcPath = srcstat.getPath();
// open src file
try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
// open tmp file
try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
LOG.info("Copying file " + srcPath + " of size " +
srcstat.getLen() + " bytes...");
// copy file
for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
out.write(buffer, 0, bytesRead);
bytesCopied += bytesRead;
reporter.setStatus(
String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
+ absdst + " [ " +
TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
+ TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
+ " ]");
}
}
}
return bytesCopied;
}
/**
* Copy a file to a destination.
* @param srcstat src path and metadata
* @param relativedst relative dst path
* @param outc Log of skipped files
* @param reporter
* @throws IOException if copy fails(even if the validation of copy fails)
*/
private void copy(FileStatus srcstat, Path relativedst,
OutputCollector<WritableComparable<?>, Text> outc, Reporter reporter)
throws IOException {
Path absdst = new Path(destPath, relativedst);
int totfiles = job.getInt(SRC_COUNT_LABEL, -1);
assert totfiles >= 0 : "Invalid file count " + totfiles;
if (totfiles == 1) {
// Copying a single file; use dst path provided by user as
// destination file rather than destination directory
Path dstparent = absdst.getParent();
if (!(destFileSys.exists(dstparent) &&
destFileSys.getFileStatus(dstparent).isDirectory())) {
absdst = dstparent;
}
}
// if a directory, ensure created even if empty
if (srcstat.isDirectory()) {
if (destFileSys.exists(absdst)) {
if (destFileSys.getFileStatus(absdst).isFile()) {
throw new IOException("Failed to mkdirs: " + absdst+" is a file.");
}
}
else if (!destFileSys.mkdirs(absdst)) {
throw new IOException("Failed to mkdirs " + absdst);
}
// TODO: when modification times can be set, directories should be
// emitted to reducers so they might be preserved. Also, mkdirs does
// not currently return an error when the directory already exists;
// if this changes, all directory work might as well be done in reduce
return;
}
// Can we skip copying this file ?
if (skipCopyFile(srcstat, absdst, outc, reporter)) {
return;
}
Path tmpfile = new Path(job.get(TMP_DIR_LABEL), relativedst);
// do the actual copy to tmpfile
long bytesCopied = doCopyFile(srcstat, tmpfile, absdst, reporter);
if (bytesCopied != srcstat.getLen()) {
throw new IOException("File size not matched: copied "
+ bytesString(bytesCopied) + " to tmpfile (=" + tmpfile
+ ") but expected " + bytesString(srcstat.getLen())
+ " from " + srcstat.getPath());
}
else {
if (destFileSys.exists(absdst) &&
destFileSys.getFileStatus(absdst).isDirectory()) {
throw new IOException(absdst + " is a directory");
}
if (!destFileSys.mkdirs(absdst.getParent())) {
throw new IOException("Failed to create parent dir: " + absdst.getParent());
}
rename(tmpfile, absdst);
if (!validateCopy(srcstat, absdst)) {
destFileSys.delete(absdst, false);
throw new IOException("Validation of copy of file "
+ srcstat.getPath() + " failed.");
}
updateDestStatus(srcstat, destFileSys.getFileStatus(absdst));
}
// report at least once for each file
updateCopyStatus(srcstat, reporter);
}
/** rename tmp to dst, delete dst if already exists */
private void rename(Path tmp, Path dst) throws IOException {
try {
if (destFileSys.exists(dst)) {
destFileSys.delete(dst, true);
}
if (!destFileSys.rename(tmp, dst)) {
throw new IOException();
}
}
catch(IOException cause) {
throw (IOException)new IOException("Fail to rename tmp file (=" + tmp
+ ") to destination file (=" + dst + ")").initCause(cause);
}
}
private void updateDestStatus(FileStatus src, FileStatus dst
) throws IOException {
if (preserve_status) {
DistCpV1.updateDestStatus(src, dst, preseved, destFileSys);
}
}
static String bytesString(long b) {
return b + " bytes (" +
TraditionalBinaryPrefix.long2String(b, "", 1) + ")";
}
/**
* Copies a file and validates the copy by checking the checksums.
* If validation fails, retries (max number of tries is distcp.file.retries)
* to copy the file.
*/
void copyWithRetries(FileStatus srcstat, Path relativedst,
OutputCollector<WritableComparable<?>, Text> out,
Reporter reporter) throws IOException {
// max tries to copy when validation of copy fails
final int maxRetries = job.getInt(FILE_RETRIES_LABEL, DEFAULT_FILE_RETRIES);
// save update flag for later copies within the same map task
final boolean saveUpdate = update;
int retryCnt = 1;
for (; retryCnt <= maxRetries; retryCnt++) {
try {
//copy the file and validate copy
copy(srcstat, relativedst, out, reporter);
break;// copy successful
} catch (IOException e) {
LOG.warn("Copy of " + srcstat.getPath() + " failed.", e);
if (retryCnt < maxRetries) {// copy failed and need to retry
LOG.info("Retrying copy of file " + srcstat.getPath());
update = true; // set update flag for retries
}
else {// no more retries... Give up
update = saveUpdate;
throw new IOException("Copy of file failed even with " + retryCnt
+ " tries.", e);
}
}
}
}
/** Mapper configuration.
* Extracts source and destination file system, as well as
* top-level paths on source and destination directories.
* Gets the named file systems, to be used later in map.
*/
public void configure(JobConf job)
{
destPath = new Path(job.get(DST_DIR_LABEL, "/"));
try {
destFileSys = destPath.getFileSystem(job);
} catch (IOException ex) {
throw new RuntimeException("Unable to get the named file system.", ex);
}
sizeBuf = job.getInt("copy.buf.size", 128 * 1024);
buffer = new byte[sizeBuf];
ignoreReadFailures = job.getBoolean(Options.IGNORE_READ_FAILURES.propertyname, false);
preserve_status = job.getBoolean(Options.PRESERVE_STATUS.propertyname, false);
if (preserve_status) {
preseved = FileAttribute.parse(job.get(PRESERVE_STATUS_LABEL));
}
update = job.getBoolean(Options.UPDATE.propertyname, false);
overwrite = !update && job.getBoolean(Options.OVERWRITE.propertyname, false);
skipCRCCheck = job.getBoolean(Options.SKIPCRC.propertyname, false);
this.job = job;
}
/** Map method. Copies one file from source file system to destination.
* @param key src len
* @param value FilePair (FileStatus src, Path dst)
* @param out Log of failed copies
* @param reporter
*/
public void map(LongWritable key,
FilePair value,
OutputCollector<WritableComparable<?>, Text> out,
Reporter reporter) throws IOException {
final FileStatus srcstat = value.input;
final Path relativedst = new Path(value.output);
try {
copyWithRetries(srcstat, relativedst, out, reporter);
} catch (IOException e) {
++failcount;
reporter.incrCounter(Counter.FAIL, 1);
updateStatus(reporter);
final String sfailure = "FAIL " + relativedst + " : " +
StringUtils.stringifyException(e);
out.collect(null, new Text(sfailure));
LOG.info(sfailure);
if (e instanceof FileNotFoundException) {
final String s = "Possible Cause for failure: Either the filesystem "
+ srcstat.getPath().getFileSystem(job)
+ " is not accessible or the file is deleted";
LOG.error(s);
out.collect(null, new Text(s));
}
try {
for (int i = 0; i < 3; ++i) {
try {
final Path tmp = new Path(job.get(TMP_DIR_LABEL), relativedst);
if (destFileSys.delete(tmp, true))
break;
} catch (Throwable ex) {
// ignore, we are just cleaning up
LOG.debug("Ignoring cleanup exception", ex);
}
// update status, so we don't get timed out
updateStatus(reporter);
Thread.sleep(3 * 1000);
}
} catch (InterruptedException inte) {
throw (IOException)new IOException().initCause(inte);
}
} finally {
updateStatus(reporter);
}
}
public void close() throws IOException {
if (0 == failcount || ignoreReadFailures) {
return;
}
throw new IOException(getCountString());
}
}
private static List<Path> fetchFileList(Configuration conf, Path srcList)
throws IOException {
List<Path> result = new ArrayList<Path>();
FileSystem fs = srcList.getFileSystem(conf);
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.open(srcList),
Charset.forName("UTF-8")))) {
String line = input.readLine();
while (line != null) {
result.add(new Path(line));
line = input.readLine();
}
}
return result;
}
@Deprecated
public static void copy(Configuration conf, String srcPath,
String destPath, Path logPath,
boolean srcAsList, boolean ignoreReadFailures)
throws IOException {
final Path src = new Path(srcPath);
List<Path> tmp = new ArrayList<Path>();
if (srcAsList) {
tmp.addAll(fetchFileList(conf, src));
} else {
tmp.add(src);
}
EnumSet<Options> flags = ignoreReadFailures
? EnumSet.of(Options.IGNORE_READ_FAILURES)
: EnumSet.noneOf(Options.class);
final Path dst = new Path(destPath);
copy(conf, new Arguments(tmp, null, dst, logPath, flags, null,
Long.MAX_VALUE, Long.MAX_VALUE, null, false));
}
/** Sanity check for srcPath */
private static void checkSrcPath(JobConf jobConf, List<Path> srcPaths)
throws IOException {
List<IOException> rslt = new ArrayList<IOException>();
List<Path> unglobbed = new LinkedList<Path>();
Path[] ps = new Path[srcPaths.size()];
ps = srcPaths.toArray(ps);
TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), ps, jobConf);
for (Path p : srcPaths) {
FileSystem fs = p.getFileSystem(jobConf);
FileStatus[] inputs = fs.globStatus(p);
if(inputs != null && inputs.length > 0) {
for (FileStatus onePath: inputs) {
unglobbed.add(onePath.getPath());
}
} else {
rslt.add(new IOException("Input source " + p + " does not exist."));
}
}
if (!rslt.isEmpty()) {
throw new InvalidInputException(rslt);
}
srcPaths.clear();
srcPaths.addAll(unglobbed);
}
/**
* Driver to copy srcPath to destPath depending on required protocol.
* @param conf configuration
* @param args arguments
*/
static void copy(final Configuration conf, final Arguments args
) throws IOException {
LOG.info("srcPaths=" + args.srcs);
if (!args.dryrun || args.flags.contains(Options.UPDATE)) {
LOG.info("destPath=" + args.dst);
}
JobConf job = createJobConf(conf);
checkSrcPath(job, args.srcs);
if (args.preservedAttributes != null) {
job.set(PRESERVE_STATUS_LABEL, args.preservedAttributes);
}
if (args.mapredSslConf != null) {
job.set("dfs.https.client.keystore.resource", args.mapredSslConf);
}
//Initialize the mapper
try {
if (setup(conf, job, args)) {
JobClient.runJob(job);
}
if(!args.dryrun) {
finalize(conf, job, args.dst, args.preservedAttributes);
}
} finally {
if (!args.dryrun) {
//delete tmp
fullyDelete(job.get(TMP_DIR_LABEL), job);
}
//delete jobDirectory
fullyDelete(job.get(JOB_DIR_LABEL), job);
}
}
private static void updateDestStatus(FileStatus src, FileStatus dst,
EnumSet<FileAttribute> preseved, FileSystem destFileSys
) throws IOException {
String owner = null;
String group = null;
if (preseved.contains(FileAttribute.USER)
&& !src.getOwner().equals(dst.getOwner())) {
owner = src.getOwner();
}
if (preseved.contains(FileAttribute.GROUP)
&& !src.getGroup().equals(dst.getGroup())) {
group = src.getGroup();
}
if (owner != null || group != null) {
destFileSys.setOwner(dst.getPath(), owner, group);
}
if (preseved.contains(FileAttribute.PERMISSION)
&& !src.getPermission().equals(dst.getPermission())) {
destFileSys.setPermission(dst.getPath(), src.getPermission());
}
if (preseved.contains(FileAttribute.TIMES)) {
destFileSys.setTimes(dst.getPath(), src.getModificationTime(), src.getAccessTime());
}
}
static private void finalize(Configuration conf, JobConf jobconf,
final Path destPath, String presevedAttributes) throws IOException {
if (presevedAttributes == null) {
return;
}
EnumSet<FileAttribute> preseved = FileAttribute.parse(presevedAttributes);
if (!preseved.contains(FileAttribute.USER)
&& !preseved.contains(FileAttribute.GROUP)
&& !preseved.contains(FileAttribute.PERMISSION)) {
return;
}
FileSystem dstfs = destPath.getFileSystem(conf);
Path dstdirlist = new Path(jobconf.get(DST_DIR_LIST_LABEL));
try (SequenceFile.Reader in =
new SequenceFile.Reader(jobconf, Reader.file(dstdirlist))) {
Text dsttext = new Text();
FilePair pair = new FilePair();
for(; in.next(dsttext, pair); ) {
Path absdst = new Path(destPath, pair.output);
updateDestStatus(pair.input, dstfs.getFileStatus(absdst),
preseved, dstfs);
}
}
}
static class Arguments {
final List<Path> srcs;
final Path basedir;
final Path dst;
final Path log;
final EnumSet<Options> flags;
final String preservedAttributes;
final long filelimit;
final long sizelimit;
final String mapredSslConf;
final boolean dryrun;
/**
* Arguments for distcp
* @param srcs List of source paths
* @param basedir Base directory for copy
* @param dst Destination path
* @param log Log output directory
* @param flags Command-line flags
* @param preservedAttributes Preserved attributes
* @param filelimit File limit
* @param sizelimit Size limit
* @param mapredSslConf ssl configuration
* @param dryrun
*/
Arguments(List<Path> srcs, Path basedir, Path dst, Path log,
EnumSet<Options> flags, String preservedAttributes,
long filelimit, long sizelimit, String mapredSslConf,
boolean dryrun) {
this.srcs = srcs;
this.basedir = basedir;
this.dst = dst;
this.log = log;
this.flags = flags;
this.preservedAttributes = preservedAttributes;
this.filelimit = filelimit;
this.sizelimit = sizelimit;
this.mapredSslConf = mapredSslConf;
this.dryrun = dryrun;
if (LOG.isTraceEnabled()) {
LOG.trace("this = " + this);
}
}
static Arguments valueOf(String[] args, Configuration conf
) throws IOException {
List<Path> srcs = new ArrayList<Path>();
Path dst = null;
Path log = null;
Path basedir = null;
EnumSet<Options> flags = EnumSet.noneOf(Options.class);
String presevedAttributes = null;
String mapredSslConf = null;
long filelimit = Long.MAX_VALUE;
long sizelimit = Long.MAX_VALUE;
boolean dryrun = false;
for (int idx = 0; idx < args.length; idx++) {
Options[] opt = Options.values();
int i = 0;
for(; i < opt.length && !args[idx].startsWith(opt[i].cmd); i++);
if (i < opt.length) {
flags.add(opt[i]);
if (opt[i] == Options.PRESERVE_STATUS) {
presevedAttributes = args[idx].substring(2);
FileAttribute.parse(presevedAttributes); //validation
}
else if (opt[i] == Options.FILE_LIMIT) {
filelimit = Options.FILE_LIMIT.parseLong(args, ++idx);
}
else if (opt[i] == Options.SIZE_LIMIT) {
sizelimit = Options.SIZE_LIMIT.parseLong(args, ++idx);
}
} else if ("-f".equals(args[idx])) {
if (++idx == args.length) {
throw new IllegalArgumentException("urilist_uri not specified in -f");
}
srcs.addAll(fetchFileList(conf, new Path(args[idx])));
} else if ("-log".equals(args[idx])) {
if (++idx == args.length) {
throw new IllegalArgumentException("logdir not specified in -log");
}
log = new Path(args[idx]);
} else if ("-basedir".equals(args[idx])) {
if (++idx == args.length) {
throw new IllegalArgumentException("basedir not specified in -basedir");
}
basedir = new Path(args[idx]);
} else if ("-mapredSslConf".equals(args[idx])) {
if (++idx == args.length) {
throw new IllegalArgumentException("ssl conf file not specified in -mapredSslConf");
}
mapredSslConf = args[idx];
} else if ("-dryrun".equals(args[idx])) {
dryrun = true;
dst = new Path("/tmp/distcp_dummy_dest");//dummy destination
} else if ("-m".equals(args[idx])) {
if (++idx == args.length) {
throw new IllegalArgumentException("num_maps not specified in -m");
}
try {
conf.setInt(MAX_MAPS_LABEL, Integer.parseInt(args[idx]));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid argument to -m: " +
args[idx]);
}
} else if ('-' == args[idx].codePointAt(0)) {
throw new IllegalArgumentException("Invalid switch " + args[idx]);
} else if (idx == args.length -1 &&
(!dryrun || flags.contains(Options.UPDATE))) {
dst = new Path(args[idx]);
} else {
srcs.add(new Path(args[idx]));
}
}
// mandatory command-line parameters
if (srcs.isEmpty() || dst == null) {
throw new IllegalArgumentException("Missing "
+ (dst == null ? "dst path" : "src"));
}
// incompatible command-line flags
final boolean isOverwrite = flags.contains(Options.OVERWRITE);
final boolean isUpdate = flags.contains(Options.UPDATE);
final boolean isDelete = flags.contains(Options.DELETE);
final boolean skipCRC = flags.contains(Options.SKIPCRC);
if (isOverwrite && isUpdate) {
throw new IllegalArgumentException("Conflicting overwrite policies");
}
if (!isUpdate && skipCRC) {
throw new IllegalArgumentException(
Options.SKIPCRC.cmd + " is relevant only with the " +
Options.UPDATE.cmd + " option");
}
if (isDelete && !isOverwrite && !isUpdate) {
throw new IllegalArgumentException(Options.DELETE.cmd
+ " must be specified with " + Options.OVERWRITE + " or "
+ Options.UPDATE + ".");
}
return new Arguments(srcs, basedir, dst, log, flags, presevedAttributes,
filelimit, sizelimit, mapredSslConf, dryrun);
}
/** {@inheritDoc} */
public String toString() {
return getClass().getName() + "{"
+ "\n srcs = " + srcs
+ "\n dst = " + dst
+ "\n log = " + log
+ "\n flags = " + flags
+ "\n preservedAttributes = " + preservedAttributes
+ "\n filelimit = " + filelimit
+ "\n sizelimit = " + sizelimit
+ "\n mapredSslConf = " + mapredSslConf
+ "\n}";
}
}
/**
* This is the main driver for recursively copying directories
* across file systems. It takes at least two cmdline parameters. A source
* URL and a destination URL. It then essentially does an "ls -lR" on the
* source URL, and writes the output in a round-robin manner to all the map
* input files. The mapper actually copies the files allotted to it. The
* reduce is empty.
*/
public int run(String[] args) {
try {
copy(conf, Arguments.valueOf(args, conf));
return 0;
} catch (IllegalArgumentException e) {
System.err.println(StringUtils.stringifyException(e) + "\n" + usage);
ToolRunner.printGenericCommandUsage(System.err);
return -1;
} catch (DuplicationException e) {
System.err.println(StringUtils.stringifyException(e));
return DuplicationException.ERROR_CODE;
} catch (RemoteException e) {
final IOException unwrapped = e.unwrapRemoteException(
FileNotFoundException.class,
AccessControlException.class,
QuotaExceededException.class);
System.err.println(StringUtils.stringifyException(unwrapped));
return -3;
} catch (Exception e) {
System.err.println("With failures, global counters are inaccurate; " +
"consider running with -i");
System.err.println("Copy failed: " + StringUtils.stringifyException(e));
return -999;
}
}
public static void main(String[] args) throws Exception {
JobConf job = new JobConf(DistCpV1.class);
DistCpV1 distcp = new DistCpV1(job);
int res = ToolRunner.run(distcp, args);
System.exit(res);
}
/**
* Make a path relative with respect to a root path.
* absPath is always assumed to descend from root.
* Otherwise returned path is null.
*/
static String makeRelative(Path root, Path absPath) {
if (!absPath.isAbsolute()) {
throw new IllegalArgumentException("!absPath.isAbsolute(), absPath="
+ absPath);
}
String p = absPath.toUri().getPath();
StringTokenizer pathTokens = new StringTokenizer(p, "/");
for(StringTokenizer rootTokens = new StringTokenizer(
root.toUri().getPath(), "/"); rootTokens.hasMoreTokens(); ) {
if (!rootTokens.nextToken().equals(pathTokens.nextToken())) {
return null;
}
}
StringBuilder sb = new StringBuilder();
for(; pathTokens.hasMoreTokens(); ) {
sb.append(pathTokens.nextToken());
if (pathTokens.hasMoreTokens()) { sb.append(Path.SEPARATOR); }
}
return sb.length() == 0? ".": sb.toString();
}
/**
* Calculate how many maps to run.
* Number of maps is bounded by a minimum of the cumulative size of the
* copy / (distcp.bytes.per.map, default BYTES_PER_MAP or -m on the
* command line) and at most (distcp.max.map.tasks, default
* MAX_MAPS_PER_NODE * nodes in the cluster).
* @param totalBytes Count of total bytes for job
* @param job The job to configure
* @return Count of maps to run.
*/
private static int setMapCount(long totalBytes, JobConf job)
throws IOException {
int numMaps =
(int)(totalBytes / job.getLong(BYTES_PER_MAP_LABEL, BYTES_PER_MAP));
numMaps = Math.min(numMaps,
job.getInt(MAX_MAPS_LABEL, MAX_MAPS_PER_NODE *
new JobClient(job).getClusterStatus().getTaskTrackers()));
numMaps = Math.max(numMaps, 1);
job.setNumMapTasks(numMaps);
return numMaps;
}
/** Fully delete dir */
static void fullyDelete(String dir, Configuration conf) throws IOException {
if (dir != null) {
Path tmp = new Path(dir);
boolean success = tmp.getFileSystem(conf).delete(tmp, true);
if (!success) {
LOG.warn("Could not fully delete " + tmp);
}
}
}
//Job configuration
private static JobConf createJobConf(Configuration conf) {
JobConf jobconf = new JobConf(conf, DistCpV1.class);
jobconf.setJobName(conf.get("mapred.job.name", NAME));
// turn off speculative execution, because DFS doesn't handle
// multiple writers to the same file.
jobconf.setMapSpeculativeExecution(false);
jobconf.setInputFormat(CopyInputFormat.class);
jobconf.setOutputKeyClass(Text.class);
jobconf.setOutputValueClass(Text.class);
jobconf.setMapperClass(CopyFilesMapper.class);
jobconf.setNumReduceTasks(0);
return jobconf;
}
private static final Random RANDOM = new Random();
public static String getRandomId() {
return Integer.toString(RANDOM.nextInt(Integer.MAX_VALUE), 36);
}
/**
* Increase the replication factor of _distcp_src_files to
* sqrt(min(maxMapsOnCluster, numMaps)). This is to reduce the chance of
* failing of distcp because of "not having a replication of _distcp_src_files
* available for reading for some maps".
*/
private static void setReplication(Configuration conf, JobConf jobConf,
Path srcfilelist, int numMaps) throws IOException {
int numMaxMaps = new JobClient(jobConf).getClusterStatus().getMaxMapTasks();
short replication = (short) Math.ceil(
Math.sqrt(Math.min(numMaxMaps, numMaps)));
FileSystem fs = srcfilelist.getFileSystem(conf);
FileStatus srcStatus = fs.getFileStatus(srcfilelist);
if (srcStatus.getReplication() < replication) {
if (!fs.setReplication(srcfilelist, replication)) {
throw new IOException("Unable to increase the replication of file " +
srcfilelist);
}
}
}
/**
* Does the dir already exist at destination ?
* @return true if the dir already exists at destination
*/
private static boolean dirExists(Configuration conf, Path dst)
throws IOException {
FileSystem destFileSys = dst.getFileSystem(conf);
FileStatus status = null;
try {
status = destFileSys.getFileStatus(dst);
}catch (FileNotFoundException e) {
return false;
}
if (status.isFile()) {
throw new FileAlreadyExistsException("Not a dir: " + dst+" is a file.");
}
return true;
}
/**
* Initialize DFSCopyFileMapper specific job-configuration.
* @param conf : The dfs/mapred configuration.
* @param jobConf : The handle to the jobConf object to be initialized.
* @param args Arguments
* @return true if it is necessary to launch a job.
*/
static boolean setup(Configuration conf, JobConf jobConf,
final Arguments args)
throws IOException {
jobConf.set(DST_DIR_LABEL, args.dst.toUri().toString());
//set boolean values
final boolean update = args.flags.contains(Options.UPDATE);
final boolean skipCRCCheck = args.flags.contains(Options.SKIPCRC);
final boolean overwrite = !update && args.flags.contains(Options.OVERWRITE)
&& !args.dryrun;
jobConf.setBoolean(Options.UPDATE.propertyname, update);
jobConf.setBoolean(Options.SKIPCRC.propertyname, skipCRCCheck);
jobConf.setBoolean(Options.OVERWRITE.propertyname, overwrite);
jobConf.setBoolean(Options.IGNORE_READ_FAILURES.propertyname,
args.flags.contains(Options.IGNORE_READ_FAILURES));
jobConf.setBoolean(Options.PRESERVE_STATUS.propertyname,
args.flags.contains(Options.PRESERVE_STATUS));
final String randomId = getRandomId();
JobClient jClient = new JobClient(jobConf);
Path stagingArea;
try {
stagingArea =
JobSubmissionFiles.getStagingDir(jClient.getClusterHandle(), conf);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
Path jobDirectory = new Path(stagingArea + NAME + "_" + randomId);
FsPermission mapredSysPerms =
new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
FileSystem.mkdirs(jClient.getFs(), jobDirectory, mapredSysPerms);
jobConf.set(JOB_DIR_LABEL, jobDirectory.toString());
long maxBytesPerMap = conf.getLong(BYTES_PER_MAP_LABEL, BYTES_PER_MAP);
FileSystem dstfs = args.dst.getFileSystem(conf);
// get tokens for all the required FileSystems..
TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(),
new Path[] {args.dst}, conf);
boolean dstExists = dstfs.exists(args.dst);
boolean dstIsDir = false;
if (dstExists) {
dstIsDir = dstfs.getFileStatus(args.dst).isDirectory();
}
// default logPath
Path logPath = args.log;
if (logPath == null) {
String filename = "_distcp_logs_" + randomId;
if (!dstExists || !dstIsDir) {
Path parent = args.dst.getParent();
if (null == parent) {
// If dst is '/' on S3, it might not exist yet, but dst.getParent()
// will return null. In this case, use '/' as its own parent to prevent
// NPE errors below.
parent = args.dst;
}
if (!dstfs.exists(parent)) {
dstfs.mkdirs(parent);
}
logPath = new Path(parent, filename);
} else {
logPath = new Path(args.dst, filename);
}
}
FileOutputFormat.setOutputPath(jobConf, logPath);
// create src list, dst list
FileSystem jobfs = jobDirectory.getFileSystem(jobConf);
Path srcfilelist = new Path(jobDirectory, "_distcp_src_files");
Path dstfilelist = new Path(jobDirectory, "_distcp_dst_files");
Path dstdirlist = new Path(jobDirectory, "_distcp_dst_dirs");
jobConf.set(SRC_LIST_LABEL, srcfilelist.toString());
jobConf.set(DST_DIR_LIST_LABEL, dstdirlist.toString());
int srcCount = 0, cnsyncf = 0, dirsyn = 0;
long fileCount = 0L, dirCount = 0L, byteCount = 0L, cbsyncs = 0L,
skipFileCount = 0L, skipByteCount = 0L;
try (
SequenceFile.Writer src_writer = SequenceFile.createWriter(jobConf,
Writer.file(srcfilelist), Writer.keyClass(LongWritable.class),
Writer.valueClass(FilePair.class), Writer.compression(
SequenceFile.CompressionType.NONE));
SequenceFile.Writer dst_writer = SequenceFile.createWriter(jobConf,
Writer.file(dstfilelist), Writer.keyClass(Text.class),
Writer.valueClass(Text.class), Writer.compression(
SequenceFile.CompressionType.NONE));
SequenceFile.Writer dir_writer = SequenceFile.createWriter(jobConf,
Writer.file(dstdirlist), Writer.keyClass(Text.class),
Writer.valueClass(FilePair.class), Writer.compression(
SequenceFile.CompressionType.NONE));
) {
// handle the case where the destination directory doesn't exist
// and we've only a single src directory OR we're updating/overwriting
// the contents of the destination directory.
final boolean special =
(args.srcs.size() == 1 && !dstExists) || update || overwrite;
Path basedir = null;
HashSet<Path> parentDirsToCopy = new HashSet<Path>();
if (args.basedir != null) {
FileSystem basefs = args.basedir.getFileSystem(conf);
basedir = args.basedir.makeQualified(
basefs.getUri(), basefs.getWorkingDirectory());
if (!basefs.isDirectory(basedir)) {
throw new IOException("Basedir " + basedir + " is not a directory.");
}
}
for(Iterator<Path> srcItr = args.srcs.iterator(); srcItr.hasNext(); ) {
final Path src = srcItr.next();
FileSystem srcfs = src.getFileSystem(conf);
FileStatus srcfilestat = srcfs.getFileStatus(src);
Path root = special && srcfilestat.isDirectory()? src: src.getParent();
if (dstExists && !dstIsDir &&
(args.srcs.size() > 1 || srcfilestat.isDirectory())) {
// destination should not be a file
throw new IOException("Destination " + args.dst + " should be a dir" +
" if multiple source paths are there OR if" +
" the source path is a dir");
}
if (basedir != null) {
root = basedir;
Path parent = src.getParent().makeQualified(
srcfs.getUri(), srcfs.getWorkingDirectory());
while (parent != null && !parent.equals(basedir)) {
if (!parentDirsToCopy.contains(parent)){
parentDirsToCopy.add(parent);
String dst = makeRelative(root, parent);
FileStatus pst = srcfs.getFileStatus(parent);
src_writer.append(new LongWritable(0), new FilePair(pst, dst));
dst_writer.append(new Text(dst), new Text(parent.toString()));
dir_writer.append(new Text(dst), new FilePair(pst, dst));
if (++dirsyn > SYNC_FILE_MAX) {
dirsyn = 0;
dir_writer.sync();
}
}
parent = parent.getParent();
}
if (parent == null) {
throw new IOException("Basedir " + basedir +
" is not a prefix of source path " + src);
}
}
if (srcfilestat.isDirectory()) {
++srcCount;
final String dst = makeRelative(root,src);
if (!update || !dirExists(conf, new Path(args.dst, dst))) {
++dirCount;
src_writer.append(new LongWritable(0),
new FilePair(srcfilestat, dst));
}
dst_writer.append(new Text(dst), new Text(src.toString()));
}
Stack<FileStatus> pathstack = new Stack<FileStatus>();
for(pathstack.push(srcfilestat); !pathstack.empty(); ) {
FileStatus cur = pathstack.pop();
FileStatus[] children = srcfs.listStatus(cur.getPath());
for(int i = 0; i < children.length; i++) {
boolean skipPath = false;
final FileStatus child = children[i];
final String dst = makeRelative(root, child.getPath());
++srcCount;
if (child.isDirectory()) {
pathstack.push(child);
if (!update || !dirExists(conf, new Path(args.dst, dst))) {
++dirCount;
}
else {
skipPath = true; // skip creating dir at destination
}
}
else {
Path destPath = new Path(args.dst, dst);
if (cur.isFile() && (args.srcs.size() == 1)) {
// Copying a single file; use dst path provided by user as
// destination file rather than destination directory
Path dstparent = destPath.getParent();
FileSystem destFileSys = destPath.getFileSystem(jobConf);
if (!(destFileSys.exists(dstparent) &&
destFileSys.getFileStatus(dstparent).isDirectory())) {
destPath = dstparent;
}
}
//skip path if the src and the dst files are the same.
skipPath = update &&
sameFile(srcfs, child, dstfs, destPath, skipCRCCheck);
//skip path if it exceed file limit or size limit
skipPath |= fileCount == args.filelimit
|| byteCount + child.getLen() > args.sizelimit;
if (!skipPath) {
++fileCount;
byteCount += child.getLen();
if (LOG.isTraceEnabled()) {
LOG.trace("adding file " + child.getPath());
}
++cnsyncf;
cbsyncs += child.getLen();
if (cnsyncf > SYNC_FILE_MAX || cbsyncs > maxBytesPerMap) {
src_writer.sync();
dst_writer.sync();
cnsyncf = 0;
cbsyncs = 0L;
}
}
else {
++skipFileCount;
skipByteCount += child.getLen();
if (LOG.isTraceEnabled()) {
LOG.trace("skipping file " + child.getPath());
}
}
}
if (!skipPath) {
src_writer.append(new LongWritable(child.isDirectory()? 0: child.getLen()),
new FilePair(child, dst));
}
dst_writer.append(new Text(dst),
new Text(child.getPath().toString()));
}
if (cur.isDirectory()) {
String dst = makeRelative(root, cur.getPath());
dir_writer.append(new Text(dst), new FilePair(cur, dst));
if (++dirsyn > SYNC_FILE_MAX) {
dirsyn = 0;
dir_writer.sync();
}
}
}
}
}
LOG.info("sourcePathsCount(files+directories)=" + srcCount);
LOG.info("filesToCopyCount=" + fileCount);
LOG.info("bytesToCopyCount=" +
TraditionalBinaryPrefix.long2String(byteCount, "", 1));
if (update) {
LOG.info("filesToSkipCopyCount=" + skipFileCount);
LOG.info("bytesToSkipCopyCount=" +
TraditionalBinaryPrefix.long2String(skipByteCount, "", 1));
}
if (args.dryrun) {
return false;
}
int mapCount = setMapCount(byteCount, jobConf);
// Increase the replication of _distcp_src_files, if needed
setReplication(conf, jobConf, srcfilelist, mapCount);
FileStatus dststatus = null;
try {
dststatus = dstfs.getFileStatus(args.dst);
} catch(FileNotFoundException fnfe) {
LOG.info(args.dst + " does not exist.");
}
// create dest path dir if copying > 1 file
if (dststatus == null) {
if (srcCount > 1 && !dstfs.mkdirs(args.dst)) {
throw new IOException("Failed to create" + args.dst);
}
}
final Path sorted = new Path(jobDirectory, "_distcp_sorted");
checkDuplication(jobfs, dstfilelist, sorted, conf);
if (dststatus != null && args.flags.contains(Options.DELETE)) {
long deletedPathsCount = deleteNonexisting(dstfs, dststatus, sorted,
jobfs, jobDirectory, jobConf, conf);
LOG.info("deletedPathsFromDestCount(files+directories)=" +
deletedPathsCount);
}
Path tmpDir = new Path(
(dstExists && !dstIsDir) || (!dstExists && srcCount == 1)?
args.dst.getParent(): args.dst, "_distcp_tmp_" + randomId);
jobConf.set(TMP_DIR_LABEL, tmpDir.toUri().toString());
// Explicitly create the tmpDir to ensure that it can be cleaned
// up by fullyDelete() later.
tmpDir.getFileSystem(conf).mkdirs(tmpDir);
LOG.info("sourcePathsCount=" + srcCount);
LOG.info("filesToCopyCount=" + fileCount);
LOG.info("bytesToCopyCount=" +
TraditionalBinaryPrefix.long2String(byteCount, "", 1));
jobConf.setInt(SRC_COUNT_LABEL, srcCount);
jobConf.setLong(TOTAL_SIZE_LABEL, byteCount);
return (fileCount + dirCount) > 0;
}
/**
* Check whether the contents of src and dst are the same.
*
* Return false if dstpath does not exist
*
* If the files have different sizes, return false.
*
* If the files have the same sizes, the file checksums will be compared.
*
* When file checksum is not supported in any of file systems,
* two files are considered as the same if they have the same size.
*/
static private boolean sameFile(FileSystem srcfs, FileStatus srcstatus,
FileSystem dstfs, Path dstpath, boolean skipCRCCheck) throws IOException {
FileStatus dststatus;
try {
dststatus = dstfs.getFileStatus(dstpath);
} catch(FileNotFoundException fnfe) {
return false;
}
//same length?
if (srcstatus.getLen() != dststatus.getLen()) {
return false;
}
if (skipCRCCheck) {
LOG.debug("Skipping the CRC check");
return true;
}
//get src checksum
final FileChecksum srccs;
try {
srccs = srcfs.getFileChecksum(srcstatus.getPath());
} catch(FileNotFoundException fnfe) {
/*
* Two possible cases:
* (1) src existed once but was deleted between the time period that
* srcstatus was obtained and the try block above.
* (2) srcfs does not support file checksum and (incorrectly) throws
* FNFE, e.g. some previous versions of HftpFileSystem.
* For case (1), it is okay to return true since src was already deleted.
* For case (2), true should be returned.
*/
return true;
}
//compare checksums
try {
final FileChecksum dstcs = dstfs.getFileChecksum(dststatus.getPath());
//return true if checksum is not supported
//(i.e. some of the checksums is null)
return srccs == null || dstcs == null || srccs.equals(dstcs);
} catch(FileNotFoundException fnfe) {
return false;
}
}
/**
* Delete the dst files/dirs which do not exist in src
*
* @return total count of files and directories deleted from destination
* @throws IOException
*/
static private long deleteNonexisting(
FileSystem dstfs, FileStatus dstroot, Path dstsorted,
FileSystem jobfs, Path jobdir, JobConf jobconf, Configuration conf
) throws IOException {
if (dstroot.isFile()) {
throw new IOException("dst must be a directory when option "
+ Options.DELETE.cmd + " is set, but dst (= " + dstroot.getPath()
+ ") is not a directory.");
}
//write dst lsr results
final Path dstlsr = new Path(jobdir, "_distcp_dst_lsr");
try (final SequenceFile.Writer writer = SequenceFile.createWriter(jobconf,
Writer.file(dstlsr), Writer.keyClass(Text.class),
Writer.valueClass(NullWritable.class), Writer.compression(
SequenceFile.CompressionType.NONE))) {
//do lsr to get all file statuses in dstroot
final Stack<FileStatus> lsrstack = new Stack<FileStatus>();
for(lsrstack.push(dstroot); !lsrstack.isEmpty(); ) {
final FileStatus status = lsrstack.pop();
if (status.isDirectory()) {
for(FileStatus child : dstfs.listStatus(status.getPath())) {
String relative = makeRelative(dstroot.getPath(), child.getPath());
writer.append(new Text(relative), NullWritable.get());
lsrstack.push(child);
}
}
}
}
//sort lsr results
final Path sortedlsr = new Path(jobdir, "_distcp_dst_lsr_sorted");
SequenceFile.Sorter sorter = new SequenceFile.Sorter(jobfs,
new Text.Comparator(), Text.class, NullWritable.class, jobconf);
sorter.sort(dstlsr, sortedlsr);
//compare lsr list and dst list
long deletedPathsCount = 0;
try (SequenceFile.Reader lsrin =
new SequenceFile.Reader(jobconf, Reader.file(sortedlsr));
SequenceFile.Reader dstin =
new SequenceFile.Reader(jobconf, Reader.file(dstsorted))) {
//compare sorted lsr list and sorted dst list
final Text lsrpath = new Text();
final Text dstpath = new Text();
final Text dstfrom = new Text();
final Trash trash = new Trash(dstfs, conf);
Path lastpath = null;
boolean hasnext = dstin.next(dstpath, dstfrom);
while (lsrin.next(lsrpath, NullWritable.get())) {
int dst_cmp_lsr = dstpath.compareTo(lsrpath);
while (hasnext && dst_cmp_lsr < 0) {
hasnext = dstin.next(dstpath, dstfrom);
dst_cmp_lsr = dstpath.compareTo(lsrpath);
}
if (dst_cmp_lsr == 0) {
//lsrpath exists in dst, skip it
hasnext = dstin.next(dstpath, dstfrom);
} else {
//lsrpath does not exist, delete it
final Path rmpath = new Path(dstroot.getPath(), lsrpath.toString());
++deletedPathsCount;
if ((lastpath == null || !isAncestorPath(lastpath, rmpath))) {
if (!(trash.moveToTrash(rmpath) || dstfs.delete(rmpath, true))) {
throw new IOException("Failed to delete " + rmpath);
}
lastpath = rmpath;
}
}
}
}
return deletedPathsCount;
}
//is x an ancestor path of y?
static private boolean isAncestorPath(Path xp, Path yp) {
final String x = xp.toString();
final String y = yp.toString();
if (!y.startsWith(x)) {
return false;
}
final int len = x.length();
return y.length() == len || y.charAt(len) == Path.SEPARATOR_CHAR;
}
/** Check whether the file list have duplication. */
static private void checkDuplication(FileSystem fs, Path file, Path sorted,
Configuration conf) throws IOException {
SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs,
new Text.Comparator(), Text.class, Text.class, conf);
sorter.sort(file, sorted);
try (SequenceFile.Reader in =
new SequenceFile.Reader(conf, Reader.file(sorted))) {
Text prevdst = null, curdst = new Text();
Text prevsrc = null, cursrc = new Text();
for(; in.next(curdst, cursrc); ) {
if (prevdst != null && curdst.equals(prevdst)) {
throw new DuplicationException(
"Invalid input, there are duplicated files in the sources: "
+ prevsrc + ", " + cursrc);
}
prevdst = curdst;
curdst = new Text();
prevsrc = cursrc;
cursrc = new Text();
}
}
}
/** An exception class for duplicated source files. */
public static class DuplicationException extends IOException {
private static final long serialVersionUID = 1L;
/** Error code for this exception */
public static final int ERROR_CODE = -2;
DuplicationException(String message) {super(message);}
}
}
| 63,413 | 36.859104 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.DeprecationDelta;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.lib.LongSumReducer;
import org.apache.hadoop.mapreduce.lib.map.RegexMapper;
/**
* Logalyzer: A utility tool for archiving and analyzing hadoop logs.
* <p>
* This tool supports archiving and anaylzing (sort/grep) of log-files.
* It takes as input
* a) Input uri which will serve uris of the logs to be archived.
* b) Output directory (not mandatory).
* b) Directory on dfs to archive the logs.
* c) The sort/grep patterns for analyzing the files and separator for boundaries.
* Usage:
* Logalyzer -archive -archiveDir <directory to archive logs> -analysis
* <directory> -logs <log-list uri> -grep <pattern> -sort
* <col1, col2> -separator <separator>
* <p>
*/
@Deprecated
public class Logalyzer {
// Constants
private static Configuration fsConfig = new Configuration();
public static final String SORT_COLUMNS =
"logalizer.logcomparator.sort.columns";
public static final String COLUMN_SEPARATOR =
"logalizer.logcomparator.column.separator";
static {
Configuration.addDeprecations(new DeprecationDelta[] {
new DeprecationDelta("mapred.reducer.sort", SORT_COLUMNS),
new DeprecationDelta("mapred.reducer.separator", COLUMN_SEPARATOR)
});
}
/** A {@link Mapper} that extracts text matching a regular expression. */
public static class LogRegexMapper<K extends WritableComparable>
extends MapReduceBase
implements Mapper<K, Text, Text, LongWritable> {
private Pattern pattern;
public void configure(JobConf job) {
pattern = Pattern.compile(job.get(RegexMapper.PATTERN));
}
public void map(K key, Text value,
OutputCollector<Text, LongWritable> output,
Reporter reporter)
throws IOException {
String text = value.toString();
Matcher matcher = pattern.matcher(text);
while (matcher.find()) {
output.collect(value, new LongWritable(1));
}
}
}
/** A WritableComparator optimized for UTF8 keys of the logs. */
public static class LogComparator extends Text.Comparator implements Configurable {
private static Log LOG = LogFactory.getLog(Logalyzer.class);
private JobConf conf = null;
private String[] sortSpec = null;
private String columnSeparator = null;
public void setConf(Configuration conf) {
if (conf instanceof JobConf) {
this.conf = (JobConf) conf;
} else {
this.conf = new JobConf(conf);
}
//Initialize the specification for *comparision*
String sortColumns = this.conf.get(SORT_COLUMNS, null);
if (sortColumns != null) {
sortSpec = sortColumns.split(",");
}
//Column-separator
columnSeparator = this.conf.get(COLUMN_SEPARATOR, "");
}
public Configuration getConf() {
return conf;
}
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
if (sortSpec == null) {
return super.compare(b1, s1, l1, b2, s2, l2);
}
try {
Text logline1 = new Text();
logline1.readFields(new DataInputStream(new ByteArrayInputStream(b1, s1, l1)));
String line1 = logline1.toString();
String[] logColumns1 = line1.split(columnSeparator);
Text logline2 = new Text();
logline2.readFields(new DataInputStream(new ByteArrayInputStream(b2, s2, l2)));
String line2 = logline2.toString();
String[] logColumns2 = line2.split(columnSeparator);
if (logColumns1 == null || logColumns2 == null) {
return super.compare(b1, s1, l1, b2, s2, l2);
}
//Compare column-wise according to *sortSpec*
for(int i=0; i < sortSpec.length; ++i) {
int column = Integer.parseInt(sortSpec[i]);
String c1 = logColumns1[column];
String c2 = logColumns2[column];
//Compare columns
int comparision = super.compareBytes(
c1.getBytes(Charset.forName("UTF-8")), 0, c1.length(),
c2.getBytes(Charset.forName("UTF-8")), 0, c2.length()
);
//They differ!
if (comparision != 0) {
return comparision;
}
}
} catch (IOException ioe) {
LOG.fatal("Caught " + ioe);
return 0;
}
return 0;
}
static {
// register this comparator
WritableComparator.define(Text.class, new LogComparator());
}
}
/**
* doArchive: Workhorse function to archive log-files.
* @param logListURI : The uri which will serve list of log-files to archive.
* @param archiveDirectory : The directory to store archived logfiles.
* @throws IOException
*/
@SuppressWarnings("deprecation")
public void
doArchive(String logListURI, String archiveDirectory)
throws IOException
{
String destURL = FileSystem.getDefaultUri(fsConfig) + archiveDirectory;
DistCpV1.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false);
}
/**
* doAnalyze:
* @param inputFilesDirectory : Directory containing the files to be analyzed.
* @param outputDirectory : Directory to store analysis (output).
* @param grepPattern : Pattern to *grep* for.
* @param sortColumns : Sort specification for output.
* @param columnSeparator : Column separator.
* @throws IOException
*/
public void
doAnalyze(String inputFilesDirectory, String outputDirectory,
String grepPattern, String sortColumns, String columnSeparator)
throws IOException
{
Path grepInput = new Path(inputFilesDirectory);
Path analysisOutput = null;
if (outputDirectory.equals("")) {
analysisOutput = new Path(inputFilesDirectory, "logalyzer_" +
Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
} else {
analysisOutput = new Path(outputDirectory);
}
JobConf grepJob = new JobConf(fsConfig);
grepJob.setJobName("logalyzer-grep-sort");
FileInputFormat.setInputPaths(grepJob, grepInput);
grepJob.setInputFormat(TextInputFormat.class);
grepJob.setMapperClass(LogRegexMapper.class);
grepJob.set(RegexMapper.PATTERN, grepPattern);
grepJob.set(SORT_COLUMNS, sortColumns);
grepJob.set(COLUMN_SEPARATOR, columnSeparator);
grepJob.setCombinerClass(LongSumReducer.class);
grepJob.setReducerClass(LongSumReducer.class);
FileOutputFormat.setOutputPath(grepJob, analysisOutput);
grepJob.setOutputFormat(TextOutputFormat.class);
grepJob.setOutputKeyClass(Text.class);
grepJob.setOutputValueClass(LongWritable.class);
grepJob.setOutputKeyComparatorClass(LogComparator.class);
grepJob.setNumReduceTasks(1); // write a single file
JobClient.runJob(grepJob);
}
public static void main(String[] args) {
Log LOG = LogFactory.getLog(Logalyzer.class);
String version = "Logalyzer.0.0.1";
String usage = "Usage: Logalyzer [-archive -logs <urlsFile>] " +
"-archiveDir <archiveDirectory> " +
"-grep <pattern> -sort <column1,column2,...> -separator <separator> " +
"-analysis <outputDirectory>";
System.out.println(version);
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
//Command line arguments
boolean archive = false;
boolean grep = false;
boolean sort = false;
String archiveDir = "";
String logListURI = "";
String grepPattern = ".*";
String sortColumns = "";
String columnSeparator = " ";
String outputDirectory = "";
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-archive")) {
archive = true;
} else if (args[i].equals("-archiveDir")) {
archiveDir = args[++i];
} else if (args[i].equals("-grep")) {
grep = true;
grepPattern = args[++i];
} else if (args[i].equals("-logs")) {
logListURI = args[++i];
} else if (args[i].equals("-sort")) {
sort = true;
sortColumns = args[++i];
} else if (args[i].equals("-separator")) {
columnSeparator = args[++i];
} else if (args[i].equals("-analysis")) {
outputDirectory = args[++i];
}
}
LOG.info("analysisDir = " + outputDirectory);
LOG.info("archiveDir = " + archiveDir);
LOG.info("logListURI = " + logListURI);
LOG.info("grepPattern = " + grepPattern);
LOG.info("sortColumns = " + sortColumns);
LOG.info("separator = " + columnSeparator);
try {
Logalyzer logalyzer = new Logalyzer();
// Archive?
if (archive) {
logalyzer.doArchive(logListURI, archiveDir);
}
// Analyze?
if (grep || sort) {
logalyzer.doAnalyze(archiveDir, outputDirectory, grepPattern, sortColumns, columnSeparator);
}
} catch (IOException ioe) {
ioe.printStackTrace();
System.exit(-1);
}
} //main
} //class Logalyzer
| 11,302 | 33.251515 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/mapred/tools/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Command-line tools associated with the org.apache.hadoop.mapred package.
*/
package org.apache.hadoop.mapred.tools;
| 931 | 39.521739 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/mapred/tools/GetGroups.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.tools;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.GetGroupsBase;
import org.apache.hadoop.util.ToolRunner;
/**
* MR implementation of a tool for getting the groups which a given user
* belongs to.
*/
public class GetGroups extends GetGroupsBase {
static {
Configuration.addDefaultResource("mapred-default.xml");
Configuration.addDefaultResource("mapred-site.xml");
}
GetGroups(Configuration conf) {
super(conf);
}
GetGroups(Configuration conf, PrintStream out) {
super(conf, out);
}
@Override
protected InetSocketAddress getProtocolAddress(Configuration conf)
throws IOException {
throw new UnsupportedOperationException();
}
public static void main(String[] argv) throws Exception {
int res = ToolRunner.run(new GetGroups(new Configuration()), argv);
System.exit(res);
}
}
| 1,809 | 30.206897 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/ConcatenatedInputFilesDemuxer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
public class ConcatenatedInputFilesDemuxer implements InputDemuxer {
private String name;
private DelimitedInputStream input;
private String knownNextFileName = null;
static private int MAXIMUM_HEADER_LINE_LENGTH = 500;
@Override
public void bindTo(Path path, Configuration conf) throws IOException {
InputStream underlyingInput = null;
if (name != null) { // re-binding before the previous one was consumed.
close();
}
name = path.getName();
underlyingInput = new PossiblyDecompressedInputStream(path, conf);
input =
new DelimitedInputStream(new BufferedInputStream(underlyingInput),
"\f!!FILE=", "!!\n");
knownNextFileName = input.nextFileName();
if (knownNextFileName == null) {
close();
return;
}
/*
* We handle files in specialized formats by trying their demuxers first,
* not by failing here.
*/
return;
}
@Override
public Pair<String, InputStream> getNext() throws IOException {
if (knownNextFileName != null) {
Pair<String, InputStream> result =
new Pair<String, InputStream>(knownNextFileName, input);
knownNextFileName = null;
return result;
}
String nextFileName = input.nextFileName();
if (nextFileName == null) {
return null;
}
return new Pair<String, InputStream>(nextFileName, input);
}
@Override
public void close() throws IOException {
if (input != null) {
input.close();
}
}
/**
* A simple wrapper class to make any input stream delimited. It has an extra
* method, getName.
*
* The input stream should have lines that look like
* <marker><filename><endmarker> . The text <marker> should not occur
* elsewhere in the file. The text <endmarker> should not occur in a file
* name.
*/
static class DelimitedInputStream extends InputStream {
private InputStream input;
private boolean endSeen = false;
private final String fileMarker;
private final byte[] markerBytes;
private final byte[] fileMarkerBuffer;
private final String fileEndMarker;
private final byte[] endMarkerBytes;
private final byte[] fileEndMarkerBuffer;
/**
* Constructor.
*
* @param input
*/
public DelimitedInputStream(InputStream input, String fileMarker,
String fileEndMarker) {
this.input = new BufferedInputStream(input, 10000);
this.input.mark(10000);
this.fileMarker = fileMarker;
this.markerBytes = this.fileMarker.getBytes();
this.fileMarkerBuffer = new byte[this.markerBytes.length];
this.fileEndMarker = fileEndMarker;
this.endMarkerBytes = this.fileEndMarker.getBytes();
this.fileEndMarkerBuffer = new byte[this.endMarkerBytes.length];
}
@Override
public int read() throws IOException {
if (endSeen) {
return -1;
}
input.mark(10000);
int result = input.read();
if (result < 0) {
endSeen = true;
return result;
}
if (result == markerBytes[0]) {
input.reset();
// this might be a marker line
int markerReadResult =
input.read(fileMarkerBuffer, 0, fileMarkerBuffer.length);
input.reset();
if (markerReadResult < fileMarkerBuffer.length
|| !fileMarker.equals(new String(fileMarkerBuffer))) {
return input.read();
}
return -1;
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.io.InputStream#read(byte[], int, int)
*
* This does SLIGHTLY THE WRONG THING.
*
* If we run off the end of the segment then the input buffer will be
* dirtied beyond the point where we claim to have read. If this turns out
* to be a problem, save that data somewhere and restore it if needed.
*/
@Override
public int read(byte[] buffer, int offset, int length) throws IOException {
if (endSeen) {
return -1;
}
input.mark(length + markerBytes.length + 10);
int dataSeen = input.read(buffer, offset, length);
byte[] extraReadBuffer = null;
int extraActualRead = -1;
// search for an instance of a file marker
for (int i = offset; i < offset + dataSeen; ++i) {
if (buffer[i] == markerBytes[0]) {
boolean mismatch = false;
for (int j = 1; j < Math.min(markerBytes.length, offset + dataSeen
- i); ++j) {
if (buffer[i + j] != markerBytes[j]) {
mismatch = true;
break;
}
}
if (!mismatch) {
// see if we have only a prefix of the markerBytes
int uncheckedMarkerCharCount =
markerBytes.length - (offset + dataSeen - i);
if (uncheckedMarkerCharCount > 0) {
if (extraReadBuffer == null) {
extraReadBuffer = new byte[markerBytes.length - 1];
extraActualRead = input.read(extraReadBuffer);
}
if (extraActualRead < uncheckedMarkerCharCount) {
input.reset();
return input.read(buffer, offset, length);
}
for (int j = 0; j < uncheckedMarkerCharCount; ++j) {
if (extraReadBuffer[j] != markerBytes[markerBytes.length
- uncheckedMarkerCharCount + j]) {
input.reset();
return input.read(buffer, offset, length);
}
}
}
input.reset();
if (i == offset) {
return -1;
}
int result = input.read(buffer, offset, i - offset);
return result;
}
}
}
return dataSeen;
}
@Override
public int read(byte[] buffer) throws IOException {
return read(buffer, 0, buffer.length);
}
@Override
public void close() throws IOException {
if (endSeen) {
input.close();
}
}
String nextFileName() throws IOException {
return nextFileName(MAXIMUM_HEADER_LINE_LENGTH);
}
private String nextFileName(int bufferSize) throws IOException {
// the line can't contain a newline and must contain a form feed
byte[] buffer = new byte[bufferSize];
input.mark(bufferSize + 1);
int actualRead = input.read(buffer);
int mostRecentRead = actualRead;
while (actualRead < bufferSize && mostRecentRead > 0) {
mostRecentRead =
input.read(buffer, actualRead, bufferSize - actualRead);
if (mostRecentRead > 0) {
actualRead += mostRecentRead;
}
}
if (actualRead < markerBytes.length) {
input.reset();
return null;
}
for (int i = 0; i < markerBytes.length; ++i) {
if (markerBytes[i] != buffer[i]) {
input.reset();
return null;
}
}
for (int i = markerBytes.length; i < actualRead; ++i) {
if (buffer[i] == endMarkerBytes[0]) {
// gather the file name
input.reset();
// burn the marker
if (input.read(buffer, 0, markerBytes.length) < markerBytes.length) {
throw new IOException("Can't reread bytes I've read before.");
}
// get the file name
if (input.read(buffer, 0, i - markerBytes.length) < i
- markerBytes.length) {
throw new IOException("Can't reread bytes I've read before.");
}
// burn the two exclamation points and the newline
if (input.read(fileEndMarkerBuffer) < fileEndMarkerBuffer.length) {
input.reset();
return null;
}
for (int j = 0; j < endMarkerBytes.length; ++j) {
if (endMarkerBytes[j] != fileEndMarkerBuffer[j]) {
input.reset();
return null;
}
}
return new String(buffer, 0, i - markerBytes.length);
}
if (buffer[i] == '\n') {
return null;
}
}
// we ran off the end. Was the buffer too short, or is this all there was?
input.reset();
if (actualRead < bufferSize) {
return null;
}
return nextFileName(bufferSize * 2);
}
}
}
| 9,380 | 26.672566 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.codehaus.jackson.JsonEncoding;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
@Ignore
public class TestHistograms {
/**
* @throws IOException
*
* There should be files in the directory named by
* ${test.build.data}/rumen/histogram-test .
*
* There will be pairs of files, inputXxx.json and goldXxx.json .
*
* We read the input file as a HistogramRawTestData in json. Then we
* create a Histogram using the data field, and then a
* LoggedDiscreteCDF using the percentiles and scale field. Finally,
* we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
* deepCompare them.
*/
@Test
public void testHistograms() throws IOException {
final Configuration conf = new Configuration();
final FileSystem lfs = FileSystem.getLocal(conf);
final Path rootInputDir = new Path(
System.getProperty("test.tools.input.dir", "")).makeQualified(lfs);
final Path rootInputFile = new Path(rootInputDir, "rumen/histogram-tests");
FileStatus[] tests = lfs.listStatus(rootInputFile);
for (int i = 0; i < tests.length; ++i) {
Path filePath = tests[i].getPath();
String fileName = filePath.getName();
if (fileName.startsWith("input")) {
String testName = fileName.substring("input".length());
Path goldFilePath = new Path(rootInputFile, "gold"+testName);
assertTrue("Gold file dies not exist", lfs.exists(goldFilePath));
LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs);
System.out.println("Testing a Histogram for " + fileName);
FSDataInputStream goldStream = lfs.open(goldFilePath);
JsonObjectMapperParser<LoggedDiscreteCDF> parser = new JsonObjectMapperParser<LoggedDiscreteCDF>(
goldStream, LoggedDiscreteCDF.class);
try {
LoggedDiscreteCDF dcdf = parser.getNext();
dcdf.deepCompare(newResult, new TreePath(null, "<root>"));
} catch (DeepInequalityException e) {
fail(e.path.toString());
}
finally {
parser.close();
}
}
}
}
private static LoggedDiscreteCDF histogramFileToCDF(Path path, FileSystem fs)
throws IOException {
FSDataInputStream dataStream = fs.open(path);
JsonObjectMapperParser<HistogramRawTestData> parser = new JsonObjectMapperParser<HistogramRawTestData>(
dataStream, HistogramRawTestData.class);
HistogramRawTestData data;
try {
data = parser.getNext();
} finally {
parser.close();
}
Histogram hist = new Histogram();
List<Long> measurements = data.getData();
List<Long> typeProbeData = new HistogramRawTestData().getData();
assertTrue(
"The data attribute of a jackson-reconstructed HistogramRawTestData "
+ " should be a " + typeProbeData.getClass().getName()
+ ", like a virgin HistogramRawTestData, but it's a "
+ measurements.getClass().getName(),
measurements.getClass() == typeProbeData.getClass());
for (int j = 0; j < measurements.size(); ++j) {
hist.enter(measurements.get(j));
}
LoggedDiscreteCDF result = new LoggedDiscreteCDF();
int[] percentiles = new int[data.getPercentiles().size()];
for (int j = 0; j < data.getPercentiles().size(); ++j) {
percentiles[j] = data.getPercentiles().get(j);
}
result.setCDF(hist, percentiles, data.getScale());
return result;
}
public static void main(String[] args) throws IOException {
final Configuration conf = new Configuration();
final FileSystem lfs = FileSystem.getLocal(conf);
for (String arg : args) {
Path filePath = new Path(arg).makeQualified(lfs);
String fileName = filePath.getName();
if (fileName.startsWith("input")) {
LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs);
String testName = fileName.substring("input".length());
Path goldFilePath = new Path(filePath.getParent(), "gold"+testName);
ObjectMapper mapper = new ObjectMapper();
JsonFactory factory = mapper.getJsonFactory();
FSDataOutputStream ostream = lfs.create(goldFilePath, true);
JsonGenerator gen = factory.createJsonGenerator(ostream,
JsonEncoding.UTF8);
gen.useDefaultPrettyPrinter();
gen.writeObject(newResult);
gen.close();
} else {
System.err.println("Input file not started with \"input\". File "+fileName+" skipped.");
}
}
}
}
| 5,929 | 36.770701 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/HistogramRawTestData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.List;
class HistogramRawTestData {
List<Long> data = new ArrayList<Long>();
List<Integer> percentiles = new ArrayList<Integer>();
int scale;
public List<Integer> getPercentiles() {
return percentiles;
}
public void setPercentiles(List<Integer> percentiles) {
this.percentiles = percentiles;
}
public int getScale() {
return scale;
}
public void setScale(int scale) {
this.scale = scale;
}
public List<Long> getData() {
return data;
}
public void setData(List<Long> data) {
this.data = data;
}
}
| 1,451 | 25.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestRandomSeedGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.apache.hadoop.tools.rumen.RandomSeedGenerator.getSeed;
public class TestRandomSeedGenerator {
@Test
public void testSeedGeneration() {
long masterSeed1 = 42;
long masterSeed2 = 43;
assertTrue("Deterministic seeding",
getSeed("stream1", masterSeed1) == getSeed("stream1", masterSeed1));
assertTrue("Deterministic seeding",
getSeed("stream2", masterSeed2) == getSeed("stream2", masterSeed2));
assertTrue("Different streams",
getSeed("stream1", masterSeed1) != getSeed("stream2", masterSeed1));
assertTrue("Different master seeds",
getSeed("stream1", masterSeed1) != getSeed("stream1", masterSeed2));
}
}
| 1,594 | 37.902439 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestPiecewiseLinearInterpolation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestPiecewiseLinearInterpolation {
static private double maximumRelativeError = 0.002D;
static private LoggedSingleRelativeRanking makeRR(double ranking, long datum) {
LoggedSingleRelativeRanking result = new LoggedSingleRelativeRanking();
result.setDatum(datum);
result.setRelativeRanking(ranking);
return result;
}
@Test
public void testOneRun() {
LoggedDiscreteCDF input = new LoggedDiscreteCDF();
input.setMinimum(100000L);
input.setMaximum(1100000L);
ArrayList<LoggedSingleRelativeRanking> rankings = new ArrayList<LoggedSingleRelativeRanking>();
rankings.add(makeRR(0.1, 200000L));
rankings.add(makeRR(0.5, 800000L));
rankings.add(makeRR(0.9, 1000000L));
input.setRankings(rankings);
input.setNumberValues(3);
CDFRandomGenerator gen = new CDFPiecewiseLinearRandomGenerator(input);
Histogram values = new Histogram();
for (int i = 0; i < 1000000; ++i) {
long value = gen.randomValue();
values.enter(value);
}
/*
* Now we build a percentiles CDF, and compute the sum of the squares of the
* actual percentiles vrs. the predicted percentiles
*/
int[] percentiles = new int[99];
for (int i = 0; i < 99; ++i) {
percentiles[i] = i + 1;
}
long[] result = values.getCDF(100, percentiles);
long sumErrorSquares = 0L;
for (int i = 0; i < 10; ++i) {
long error = result[i] - (10000L * i + 100000L);
System.out.println("element " + i + ", got " + result[i] + ", expected "
+ (10000L * i + 100000L) + ", error = " + error);
sumErrorSquares += error * error;
}
for (int i = 10; i < 50; ++i) {
long error = result[i] - (15000L * i + 50000L);
System.out.println("element " + i + ", got " + result[i] + ", expected "
+ (15000L * i + 50000L) + ", error = " + error);
sumErrorSquares += error * error;
}
for (int i = 50; i < 90; ++i) {
long error = result[i] - (5000L * i + 550000L);
System.out.println("element " + i + ", got " + result[i] + ", expected "
+ (5000L * i + 550000L) + ", error = " + error);
sumErrorSquares += error * error;
}
for (int i = 90; i <= 100; ++i) {
long error = result[i] - (10000L * i + 100000L);
System.out.println("element " + i + ", got " + result[i] + ", expected "
+ (10000L * i + 100000L) + ", error = " + error);
sumErrorSquares += error * error;
}
// normalize the error
double realSumErrorSquares = (double) sumErrorSquares;
double normalizedError = realSumErrorSquares / 100
/ rankings.get(1).getDatum() / rankings.get(1).getDatum();
double RMSNormalizedError = Math.sqrt(normalizedError);
System.out.println("sumErrorSquares = " + sumErrorSquares);
System.out.println("normalizedError: " + normalizedError
+ ", RMSNormalizedError: " + RMSNormalizedError);
System.out.println("Cumulative error is " + RMSNormalizedError);
assertTrue("The RMS relative error per bucket, " + RMSNormalizedError
+ ", exceeds our tolerance of " + maximumRelativeError,
RMSNormalizedError <= maximumRelativeError);
}
}
| 4,140 | 32.395161 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedDiscreteCDF.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.List;
/**
* A {@link LoggedDiscreteCDF} is a discrete approximation of a cumulative
* distribution function, with this class set up to meet the requirements of the
* Jackson JSON parser/generator.
*
* All of the public methods are simply accessors for the instance variables we
* want to write out in the JSON files.
*
*/
public class LoggedDiscreteCDF implements DeepCompare {
/**
* The number of values this CDF is built on
*/
long numberValues = -1L;
/**
* The least {@code X} value
*/
long minimum = Long.MIN_VALUE;
/**
* The coordinates of the bulk of the CDF
*/
List<LoggedSingleRelativeRanking> rankings = new ArrayList<LoggedSingleRelativeRanking>();
/**
* The greatest {@code X} value
*/
long maximum = Long.MAX_VALUE;
void setCDF(Histogram data, int[] steps, int modulus) {
numberValues = data.getTotalCount();
long[] CDF = data.getCDF(modulus, steps);
if (CDF != null) {
minimum = CDF[0];
maximum = CDF[CDF.length - 1];
rankings = new ArrayList<LoggedSingleRelativeRanking>();
for (int i = 1; i < CDF.length - 1; ++i) {
LoggedSingleRelativeRanking srr = new LoggedSingleRelativeRanking();
srr.setRelativeRanking(((double) steps[i - 1]) / modulus);
srr.setDatum(CDF[i]);
rankings.add(srr);
}
}
}
public long getMinimum() {
return minimum;
}
void setMinimum(long minimum) {
this.minimum = minimum;
}
public List<LoggedSingleRelativeRanking> getRankings() {
return rankings;
}
void setRankings(List<LoggedSingleRelativeRanking> rankings) {
this.rankings = rankings;
}
public long getMaximum() {
return maximum;
}
void setMaximum(long maximum) {
this.maximum = maximum;
}
public long getNumberValues() {
return numberValues;
}
void setNumberValues(long numberValues) {
this.numberValues = numberValues;
}
private void compare1(long c1, long c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(List<LoggedSingleRelativeRanking> c1,
List<LoggedSingleRelativeRanking> c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || c1.size() != c2.size()) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
for (int i = 0; i < c1.size(); ++i) {
c1.get(i).deepCompare(c2.get(i), new TreePath(loc, eltname, i));
}
}
public void deepCompare(DeepCompare comparand, TreePath loc)
throws DeepInequalityException {
if (!(comparand instanceof LoggedDiscreteCDF)) {
throw new DeepInequalityException("comparand has wrong type", loc);
}
LoggedDiscreteCDF other = (LoggedDiscreteCDF) comparand;
compare1(numberValues, other.numberValues, loc, "numberValues");
compare1(minimum, other.minimum, loc, "minimum");
compare1(maximum, other.maximum, loc, "maximum");
compare1(rankings, other.rankings, loc, "rankings");
}
}
| 4,136 | 27.531034 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.mapreduce.ID;
import org.apache.hadoop.tools.rumen.datatypes.DataType;
import org.apache.hadoop.tools.rumen.serializers.DefaultRumenSerializer;
import org.apache.hadoop.tools.rumen.serializers.ObjectStringSerializer;
import org.codehaus.jackson.JsonEncoding;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.Version;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
import org.codehaus.jackson.map.module.SimpleModule;
/**
* Simple wrapper around {@link JsonGenerator} to write objects in JSON format.
* @param <T> The type of the objects to be written.
*/
public class JsonObjectMapperWriter<T> implements Closeable {
private JsonGenerator writer;
public JsonObjectMapperWriter(OutputStream output, boolean prettyPrint) throws IOException {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(
SerializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
// define a module
SimpleModule module = new SimpleModule("Default Serializer",
new Version(0, 1, 1, "FINAL"));
// add various serializers to the module
// add default (all-pass) serializer for all rumen specific data types
module.addSerializer(DataType.class, new DefaultRumenSerializer());
// add a serializer to use object.toString() while serializing
module.addSerializer(ID.class, new ObjectStringSerializer<ID>());
// register the module with the object-mapper
mapper.registerModule(module);
mapper.getJsonFactory();
writer = mapper.getJsonFactory().createJsonGenerator(
output, JsonEncoding.UTF8);
if (prettyPrint) {
writer.useDefaultPrettyPrinter();
}
}
public void write(T object) throws IOException {
writer.writeObject(object);
}
@Override
public void close() throws IOException {
writer.close();
}
}
| 2,883 | 36.947368 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.text.ParseException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.MapAttemptFinishedEvent;
public class MapAttempt20LineHistoryEventEmitter extends
TaskAttempt20LineEventEmitter {
static List<SingleEventEmitter> nonFinals =
new LinkedList<SingleEventEmitter>();
static List<SingleEventEmitter> finals = new LinkedList<SingleEventEmitter>();
static {
nonFinals.addAll(taskEventNonFinalSEEs);
finals.add(new MapAttemptFinishedEventEmitter());
}
protected MapAttempt20LineHistoryEventEmitter() {
super();
}
static private class MapAttemptFinishedEventEmitter extends
SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskAttemptIDName,
HistoryEventEmitter thatg) {
if (taskAttemptIDName == null) {
return null;
}
TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptIDName);
String finishTime = line.get("FINISH_TIME");
String status = line.get("TASK_STATUS");
if (finishTime != null && status != null
&& status.equalsIgnoreCase("success")) {
String hostName = line.get("HOSTNAME");
String counters = line.get("COUNTERS");
String state = line.get("STATE_STRING");
MapAttempt20LineHistoryEventEmitter that =
(MapAttempt20LineHistoryEventEmitter) thatg;
if ("success".equalsIgnoreCase(status)) {
return new MapAttemptFinishedEvent
(taskAttemptID,
that.originalTaskType, status,
Long.parseLong(finishTime),
Long.parseLong(finishTime),
hostName, -1, null, state, maybeParseCounters(counters),
null);
}
}
return null;
}
}
@Override
List<SingleEventEmitter> finalSEEs() {
return finals;
}
@Override
List<SingleEventEmitter> nonFinalSEEs() {
return nonFinals;
}
}
| 3,011 | 30.705263 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/RewindableInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* A simple wrapper class to make any input stream "rewindable". It could be
* made more memory efficient by grow the internal buffer adaptively.
*/
public class RewindableInputStream extends InputStream {
private InputStream input;
/**
* Constructor.
*
* @param input
*/
public RewindableInputStream(InputStream input) {
this(input, 1024 * 1024);
}
/**
* Constructor
*
* @param input
* input stream.
* @param maxBytesToRemember
* Maximum number of bytes we need to remember at the beginning of
* the stream. If {@link #rewind()} is called after so many bytes are
* read from the stream, {@link #rewind()} would fail.
*/
public RewindableInputStream(InputStream input, int maxBytesToRemember) {
this.input = new BufferedInputStream(input, maxBytesToRemember);
this.input.mark(maxBytesToRemember);
}
@Override
public int read() throws IOException {
return input.read();
}
@Override
public int read(byte[] buffer, int offset, int length) throws IOException {
return input.read(buffer, offset, length);
}
@Override
public void close() throws IOException {
input.close();
}
public InputStream rewind() throws IOException {
try {
input.reset();
return this;
} catch (IOException e) {
throw new IOException("Unable to rewind the stream", e);
}
}
}
| 2,361 | 28.898734 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobStory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values;
/**
* {@link JobStory} represents the runtime information available for a
* completed Map-Reduce job.
*/
public interface JobStory {
/**
* Get the {@link JobConf} for the job.
* @return the <code>JobConf</code> for the job
*/
public JobConf getJobConf();
/**
* Get the job name.
* @return the job name
*/
public String getName();
/**
* Get the job ID
* @return the job ID
*/
public JobID getJobID();
/**
* Get the user who ran the job.
* @return the user who ran the job
*/
public String getUser();
/**
* Get the job submission time.
* @return the job submission time
*/
public long getSubmissionTime();
/**
* Get the number of maps in the {@link JobStory}.
* @return the number of maps in the <code>Job</code>
*/
public int getNumberMaps();
/**
* Get the number of reduce in the {@link JobStory}.
* @return the number of reduces in the <code>Job</code>
*/
public int getNumberReduces();
/**
* Get the input splits for the job.
* @return the input splits for the job
*/
public InputSplit[] getInputSplits();
/**
* Get {@link TaskInfo} for a given task.
* @param taskType {@link TaskType} of the task
* @param taskNumber Partition number of the task
* @return the <code>TaskInfo</code> for the given task
*/
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber);
/**
* Get {@link TaskAttemptInfo} for a given task-attempt, without regard to
* impact of locality (e.g. not needed to make scheduling decisions).
* @param taskType {@link TaskType} of the task-attempt
* @param taskNumber Partition number of the task-attempt
* @param taskAttemptNumber Attempt number of the task
* @return the <code>TaskAttemptInfo</code> for the given task-attempt
*/
public TaskAttemptInfo getTaskAttemptInfo(TaskType taskType,
int taskNumber,
int taskAttemptNumber);
/**
* Get {@link TaskAttemptInfo} for a given task-attempt, considering impact
* of locality.
* @param taskNumber Partition number of the task-attempt
* @param taskAttemptNumber Attempt number of the task
* @param locality Data locality of the task as scheduled in simulation
* @return the <code>TaskAttemptInfo</code> for the given task-attempt
*/
public TaskAttemptInfo
getMapTaskAttemptInfoAdjusted(int taskNumber,
int taskAttemptNumber,
int locality);
/**
* Get the outcome of the job execution.
* @return The outcome of the job execution.
*/
public Values getOutcome();
/**
* Get the queue where the job is submitted.
* @return the queue where the job is submitted.
*/
public String getQueueName();
}
| 3,958 | 30.672 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Rumen is a data extraction and analysis tool built for
* <a href="http://hadoop.apache.org/">Apache Hadoop</a>. Rumen mines job history
* logs to extract meaningful data and stores it into an easily-parsed format.
*
* The default output format of Rumen is <a href="http://www.json.org">JSON</a>.
* Rumen uses the <a href="http://jackson.codehaus.org/">Jackson</a> library to
* create JSON objects.
* <br><br>
*
* The following classes can be used to programmatically invoke Rumen:
* <ol>
* <li>
* {@link org.apache.hadoop.tools.rumen.JobConfigurationParser}<br>
* A parser to parse and filter out interesting properties from job
* configuration.
*
* <br><br>
* <i>Sample code</i>:
* <pre>
* <code>
* // An example to parse and filter out job name
*
* String conf_filename = .. // assume the job configuration filename here
*
* // construct a list of interesting properties
* List<String> interestedProperties = new ArrayList<String>();
* interestedProperties.add("mapreduce.job.name");
*
* JobConfigurationParser jcp =
* new JobConfigurationParser(interestedProperties);
*
* InputStream in = new FileInputStream(conf_filename);
* Properties parsedProperties = jcp.parse(in);
* </code>
* </pre>
* Some of the commonly used interesting properties are enumerated in
* {@link org.apache.hadoop.tools.rumen.JobConfPropertyNames}. <br><br>
*
* <b>Note:</b>
* A single instance of {@link org.apache.hadoop.tools.rumen.JobConfigurationParser}
* can be used to parse multiple job configuration files.
*
* </li>
* <li>
* {@link org.apache.hadoop.tools.rumen.JobHistoryParser} <br>
* A parser that parses job history files. It is an interface and actual
* implementations are defined as Enum in
* {@link org.apache.hadoop.tools.rumen.JobHistoryParserFactory}. Note that
* {@link org.apache.hadoop.tools.rumen.RewindableInputStream}<br>
* is a wrapper class around {@link java.io.InputStream} to make the input
* stream rewindable.
*
* <br>
* <i>Sample code</i>:
* <pre>
* <code>
* // An example to parse a current job history file i.e a job history
* // file for which the version is known
*
* String filename = .. // assume the job history filename here
*
* InputStream in = new FileInputStream(filename);
*
* HistoryEvent event = null;
*
* JobHistoryParser parser = new CurrentJHParser(in);
*
* event = parser.nextEvent();
* // process all the events
* while (event != null) {
* // ... process all event
* event = parser.nextEvent();
* }
*
* // close the parser and the underlying stream
* parser.close();
* </code>
* </pre>
*
* {@link org.apache.hadoop.tools.rumen.JobHistoryParserFactory} provides a
* {@link org.apache.hadoop.tools.rumen.JobHistoryParserFactory#getParser(org.apache.hadoop.tools.rumen.RewindableInputStream)}
* API to get a parser for parsing the job history file. Note that this
* API can be used if the job history version is unknown.<br><br>
* <i>Sample code</i>:
* <pre>
* <code>
* // An example to parse a job history for which the version is not
* // known i.e using JobHistoryParserFactory.getParser()
*
* String filename = .. // assume the job history filename here
*
* InputStream in = new FileInputStream(filename);
* RewindableInputStream ris = new RewindableInputStream(in);
*
* // JobHistoryParserFactory will check and return a parser that can
* // parse the file
* JobHistoryParser parser = JobHistoryParserFactory.getParser(ris);
*
* // now use the parser to parse the events
* HistoryEvent event = parser.nextEvent();
* while (event != null) {
* // ... process the event
* event = parser.nextEvent();
* }
*
* parser.close();
* </code>
* </pre>
* <b>Note:</b>
* Create one instance to parse a job history log and close it after use.
* </li>
* <li>
* {@link org.apache.hadoop.tools.rumen.TopologyBuilder}<br>
* Builds the cluster topology based on the job history events. Every
* job history file consists of events. Each event can be represented using
* {@link org.apache.hadoop.mapreduce.jobhistory.HistoryEvent}.
* These events can be passed to {@link org.apache.hadoop.tools.rumen.TopologyBuilder} using
* {@link org.apache.hadoop.tools.rumen.TopologyBuilder#process(org.apache.hadoop.mapreduce.jobhistory.HistoryEvent)}.
* A cluster topology can be represented using {@link org.apache.hadoop.tools.rumen.LoggedNetworkTopology}.
* Once all the job history events are processed, the cluster
* topology can be obtained using {@link org.apache.hadoop.tools.rumen.TopologyBuilder#build()}.
*
* <br><br>
* <i>Sample code</i>:
* <pre>
* <code>
* // Building topology for a job history file represented using
* // 'filename' and the corresponding configuration file represented
* // using 'conf_filename'
* String filename = .. // assume the job history filename here
* String conf_filename = .. // assume the job configuration filename here
*
* InputStream jobConfInputStream = new FileInputStream(filename);
* InputStream jobHistoryInputStream = new FileInputStream(conf_filename);
*
* TopologyBuilder tb = new TopologyBuilder();
*
* // construct a list of interesting properties
* List<String> interestingProperties = new ArrayList%lt;String>();
* // add the interesting properties here
* interestingProperties.add("mapreduce.job.name");
*
* JobConfigurationParser jcp =
* new JobConfigurationParser(interestingProperties);
*
* // parse the configuration file
* tb.process(jcp.parse(jobConfInputStream));
*
* // read the job history file and pass it to the
* // TopologyBuilder.
* JobHistoryParser parser = new CurrentJHParser(jobHistoryInputStream);
* HistoryEvent e;
*
* // read and process all the job history events
* while ((e = parser.nextEvent()) != null) {
* tb.process(e);
* }
*
* LoggedNetworkTopology topology = tb.build();
* </code>
* </pre>
* </li>
* <li>
* {@link org.apache.hadoop.tools.rumen.JobBuilder}<br>
* Summarizes a job history file.
* {@link org.apache.hadoop.tools.rumen.JobHistoryUtils} provides
* {@link org.apache.hadoop.tools.rumen.JobHistoryUtils#extractJobID(String)}
* API for extracting job id from job history or job configuration files
* which can be used for instantiating {@link org.apache.hadoop.tools.rumen.JobBuilder}.
* {@link org.apache.hadoop.tools.rumen.JobBuilder} generates a
* {@link org.apache.hadoop.tools.rumen.LoggedJob} object via
* {@link org.apache.hadoop.tools.rumen.JobBuilder#build()}.
* See {@link org.apache.hadoop.tools.rumen.LoggedJob} for more details.
*
* <br><br>
* <i>Sample code</i>:
* <pre>
* <code>
* // An example to summarize a current job history file 'filename'
* // and the corresponding configuration file 'conf_filename'
*
* String filename = .. // assume the job history filename here
* String conf_filename = .. // assume the job configuration filename here
*
* InputStream jobConfInputStream = new FileInputStream(job_filename);
* InputStream jobHistoryInputStream = new FileInputStream(conf_filename);
*
* String jobID = TraceBuilder.extractJobID(job_filename);
* JobBuilder jb = new JobBuilder(jobID);
*
* // construct a list of interesting properties
* List<String> interestingProperties = new ArrayList%lt;String>();
* // add the interesting properties here
* interestingProperties.add("mapreduce.job.name");
*
* JobConfigurationParser jcp =
* new JobConfigurationParser(interestingProperties);
*
* // parse the configuration file
* jb.process(jcp.parse(jobConfInputStream));
*
* // parse the job history file
* JobHistoryParser parser = new CurrentJHParser(jobHistoryInputStream);
* try {
* HistoryEvent e;
* // read and process all the job history events
* while ((e = parser.nextEvent()) != null) {
* jobBuilder.process(e);
* }
* } finally {
* parser.close();
* }
*
* LoggedJob job = jb.build();
* </code>
* </pre>
* <b>Note:</b>
* The order of parsing the job configuration file or job history file is
* not important. Create one instance to parse the history file and job
* configuration.
* </li>
* <li>
* {@link org.apache.hadoop.tools.rumen.DefaultOutputter}<br>
* Implements {@link org.apache.hadoop.tools.rumen.Outputter} and writes
* JSON object in text format to the output file.
* {@link org.apache.hadoop.tools.rumen.DefaultOutputter} can be
* initialized with the output filename.
*
* <br><br>
* <i>Sample code</i>:
* <pre>
* <code>
* // An example to summarize a current job history file represented by
* // 'filename' and the configuration filename represented using
* // 'conf_filename'. Also output the job summary to 'out.json' along
* // with the cluster topology to 'topology.json'.
*
* String filename = .. // assume the job history filename here
* String conf_filename = .. // assume the job configuration filename here
*
* Configuration conf = new Configuration();
* DefaultOutputter do = new DefaultOutputter();
* do.init("out.json", conf);
*
* InputStream jobConfInputStream = new FileInputStream(filename);
* InputStream jobHistoryInputStream = new FileInputStream(conf_filename);
*
* // extract the job-id from the filename
* String jobID = TraceBuilder.extractJobID(filename);
* JobBuilder jb = new JobBuilder(jobID);
* TopologyBuilder tb = new TopologyBuilder();
*
* // construct a list of interesting properties
* List<String> interestingProperties = new ArrayList%lt;String>();
* // add the interesting properties here
* interestingProperties.add("mapreduce.job.name");
*
* JobConfigurationParser jcp =
* new JobConfigurationParser(interestingProperties);
*
* // parse the configuration file
* tb.process(jcp.parse(jobConfInputStream));
*
* // read the job history file and pass it to the
* // TopologyBuilder.
* JobHistoryParser parser = new CurrentJHParser(jobHistoryInputStream);
* HistoryEvent e;
* while ((e = parser.nextEvent()) != null) {
* jb.process(e);
* tb.process(e);
* }
*
* LoggedJob j = jb.build();
*
* // serialize the job summary in json (text) format
* do.output(j);
*
* // close
* do.close();
*
* do.init("topology.json", conf);
*
* // get the job summary using TopologyBuilder
* LoggedNetworkTopology topology = topologyBuilder.build();
*
* // serialize the cluster topology in json (text) format
* do.output(topology);
*
* // close
* do.close();
* </code>
* </pre>
* </li>
* <li>
* {@link org.apache.hadoop.tools.rumen.JobTraceReader}<br>
* A reader for reading {@link org.apache.hadoop.tools.rumen.LoggedJob} serialized using
* {@link org.apache.hadoop.tools.rumen.DefaultOutputter}. {@link org.apache.hadoop.tools.rumen.LoggedJob}
* provides various APIs for extracting job details. Following are the most
* commonly used ones
* <ul>
* <li>{@link org.apache.hadoop.tools.rumen.LoggedJob#getMapTasks()} : Get the map tasks</li>
* <li>{@link org.apache.hadoop.tools.rumen.LoggedJob#getReduceTasks()} : Get the reduce tasks</li>
* <li>{@link org.apache.hadoop.tools.rumen.LoggedJob#getOtherTasks()} : Get the setup/cleanup tasks</li>
* <li>{@link org.apache.hadoop.tools.rumen.LoggedJob#getOutcome()} : Get the job's outcome</li>
* <li>{@link org.apache.hadoop.tools.rumen.LoggedJob#getSubmitTime()} : Get the job's submit time</li>
* <li>{@link org.apache.hadoop.tools.rumen.LoggedJob#getFinishTime()} : Get the job's finish time</li>
* </ul>
*
* <br><br>
* <i>Sample code</i>:
* <pre>
* <code>
* // An example to read job summary from a trace file 'out.json'.
* JobTraceReader reader = new JobTracerReader("out.json");
* LoggedJob job = reader.getNext();
* while (job != null) {
* // .... process job level information
* for (LoggedTask task : job.getMapTasks()) {
* // process all the map tasks in the job
* for (LoggedTaskAttempt attempt : task.getAttempts()) {
* // process all the map task attempts in the job
* }
* }
*
* // get the next job
* job = reader.getNext();
* }
* reader.close();
* </code>
* </pre>
* </li>
* <li>
* {@link org.apache.hadoop.tools.rumen.ClusterTopologyReader}<br>
* A reader to read {@link org.apache.hadoop.tools.rumen.LoggedNetworkTopology} serialized using
* {@link org.apache.hadoop.tools.rumen.DefaultOutputter}. {@link org.apache.hadoop.tools.rumen.ClusterTopologyReader} can be
* initialized using the serialized topology filename.
* {@link org.apache.hadoop.tools.rumen.ClusterTopologyReader#get()} can
* be used to get the
* {@link org.apache.hadoop.tools.rumen.LoggedNetworkTopology}.
*
* <br><br>
* <i>Sample code</i>:
* <pre>
* <code>
* // An example to read the cluster topology from a topology output file
* // 'topology.json'
* ClusterTopologyReader reader = new ClusterTopologyReader("topology.json");
* LoggedNetworkTopology topology = reader.get();
* for (LoggedNetworkTopology t : topology.getChildren()) {
* // process the cluster topology
* }
* reader.close();
* </code>
* </pre>
* </li>
* </ol>
*/
package org.apache.hadoop.tools.rumen;
| 16,037 | 41.541114 | 132 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ClusterStory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.Set;
import java.util.Random;
/**
* {@link ClusterStory} represents all configurations of a MapReduce cluster,
* including nodes, network topology, and slot configurations.
*/
public interface ClusterStory {
/**
* Get all machines of the cluster.
* @return A read-only set that contains all machines of the cluster.
*/
public Set<MachineNode> getMachines();
/**
* Get all racks of the cluster.
* @return A read-only set that contains all racks of the cluster.
*/
public Set<RackNode> getRacks();
/**
* Get the cluster topology tree.
* @return The root node of the cluster topology tree.
*/
public Node getClusterTopology();
/**
* Select a random set of machines.
* @param expected The expected sample size.
* @param random Random number generator to use.
* @return An array of up to expected number of {@link MachineNode}s.
*/
public MachineNode[] getRandomMachines(int expected, Random random);
/**
* Get {@link MachineNode} by its host name.
*
* @return The {@link MachineNode} with the same name. Or null if not found.
*/
public MachineNode getMachineByName(String name);
/**
* Get {@link RackNode} by its name.
* @return The {@link RackNode} with the same name. Or null if not found.
*/
public RackNode getRackByName(String name);
/**
* Determine the distance between two {@link Node}s. Currently, the distance
* is loosely defined as the length of the longer path for either a or b to
* reach their common ancestor.
*
* @param a
* @param b
* @return The distance between {@link Node} a and {@link Node} b.
*/
int distance(Node a, Node b);
/**
* Get the maximum distance possible between any two nodes.
* @return the maximum distance possible between any two nodes.
*/
int getMaximumDistance();
}
| 2,714 | 31.321429 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Anonymizer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.mapreduce.ID;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.tools.rumen.datatypes.*;
import org.apache.hadoop.tools.rumen.serializers.*;
import org.apache.hadoop.tools.rumen.state.*;
import org.codehaus.jackson.JsonEncoding;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.Version;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.module.SimpleModule;
public class Anonymizer extends Configured implements Tool {
private boolean anonymizeTrace = false;
private Path inputTracePath = null;
private Path outputTracePath = null;
private boolean anonymizeTopology = false;
private Path inputTopologyPath = null;
private Path outputTopologyPath = null;
//TODO Make this final if not using JSON
// private final StatePool statePool = new StatePool();
private StatePool statePool;
private ObjectMapper outMapper = null;
private JsonFactory outFactory = null;
private void initialize(String[] args) throws Exception {
try {
for (int i = 0; i < args.length; ++i) {
if ("-trace".equals(args[i])) {
anonymizeTrace = true;
inputTracePath = new Path(args[i+1]);
outputTracePath = new Path(args[i+2]);
i +=2;
}
if ("-topology".equals(args[i])) {
anonymizeTopology = true;
inputTopologyPath = new Path(args[i+1]);
outputTopologyPath = new Path(args[i+2]);
i +=2;
}
}
} catch (Exception e) {
throw new IllegalArgumentException("Illegal arguments list!", e);
}
if (!anonymizeTopology && !anonymizeTrace) {
throw new IllegalArgumentException("Invalid arguments list!");
}
statePool = new StatePool();
// initialize the state manager after the anonymizers are registered
statePool.initialize(getConf());
outMapper = new ObjectMapper();
// define a module
SimpleModule module = new SimpleModule("Anonymization Serializer",
new Version(0, 1, 1, "FINAL"));
// add various serializers to the module
// use the default (as-is) serializer for default data types
module.addSerializer(DataType.class, new DefaultRumenSerializer());
// use a blocking serializer for Strings as they can contain sensitive
// information
module.addSerializer(String.class, new BlockingSerializer());
// use object.toString() for object of type ID
module.addSerializer(ID.class, new ObjectStringSerializer<ID>());
// use getAnonymizedValue() for data types that have the anonymizing
// feature
module.addSerializer(AnonymizableDataType.class,
new DefaultAnonymizingRumenSerializer(statePool, getConf()));
// register the module with the object-mapper
outMapper.registerModule(module);
outFactory = outMapper.getJsonFactory();
}
// anonymize the job trace file
private void anonymizeTrace() throws Exception {
if (anonymizeTrace) {
System.out.println("Anonymizing trace file: " + inputTracePath);
JobTraceReader reader = null;
JsonGenerator outGen = null;
Configuration conf = getConf();
try {
// create a generator
outGen = createJsonGenerator(conf, outputTracePath);
// define the input trace reader
reader = new JobTraceReader(inputTracePath, conf);
// read the plain unanonymized logged job
LoggedJob job = reader.getNext();
while (job != null) {
// write it via an anonymizing channel
outGen.writeObject(job);
// read the next job
job = reader.getNext();
}
System.out.println("Anonymized trace file: " + outputTracePath);
} finally {
if (outGen != null) {
outGen.close();
}
if (reader != null) {
reader.close();
}
}
}
}
// anonymize the cluster topology file
private void anonymizeTopology() throws Exception {
if (anonymizeTopology) {
System.out.println("Anonymizing topology file: " + inputTopologyPath);
ClusterTopologyReader reader = null;
JsonGenerator outGen = null;
Configuration conf = getConf();
try {
// create a generator
outGen = createJsonGenerator(conf, outputTopologyPath);
// define the input cluster topology reader
reader = new ClusterTopologyReader(inputTopologyPath, conf);
// read the plain unanonymized logged job
LoggedNetworkTopology job = reader.get();
// write it via an anonymizing channel
outGen.writeObject(job);
System.out.println("Anonymized topology file: " + outputTopologyPath);
} finally {
if (outGen != null) {
outGen.close();
}
}
}
}
// Creates a JSON generator
private JsonGenerator createJsonGenerator(Configuration conf, Path path)
throws IOException {
FileSystem outFS = path.getFileSystem(conf);
CompressionCodec codec =
new CompressionCodecFactory(conf).getCodec(path);
OutputStream output;
Compressor compressor = null;
if (codec != null) {
compressor = CodecPool.getCompressor(codec);
output = codec.createOutputStream(outFS.create(path), compressor);
} else {
output = outFS.create(path);
}
JsonGenerator outGen = outFactory.createJsonGenerator(output,
JsonEncoding.UTF8);
outGen.useDefaultPrettyPrinter();
return outGen;
}
@Override
public int run(String[] args) throws Exception {
try {
initialize(args);
} catch (Exception e) {
e.printStackTrace();
printUsage();
return -1;
}
return run();
}
/**
* Runs the actual anonymization tool.
*/
public int run() throws Exception {
try {
anonymizeTrace();
} catch (IOException ioe) {
System.err.println("Error running the trace anonymizer!");
ioe.printStackTrace();
System.out.println("\n\nAnonymization unsuccessful!");
return -1;
}
try {
anonymizeTopology();
} catch (IOException ioe) {
System.err.println("Error running the cluster topology anonymizer!");
ioe.printStackTrace();
System.out.println("\n\nAnonymization unsuccessful!");
return -1;
}
statePool.persist();
System.out.println("Anonymization completed successfully!");
return 0;
}
private static void printUsage() {
System.out.println("\nUsage:-");
System.out.print(" Anonymizer");
System.out.print(" [-trace <input-trace-path> <output-trace-path>]");
System.out.println(" [-topology <input-topology-path> "
+ "<output-topology-path>] ");
System.out.print("\n");
}
/**
* The main driver program to use the anonymization utility.
* @param args
*/
public static void main(String[] args) {
Anonymizer instance = new Anonymizer();
int result = 0;
try {
result = ToolRunner.run(instance, args);
} catch (Exception e) {
e.printStackTrace(System.err);
System.exit(-1);
}
if (result != 0) {
System.exit(result);
}
return;
}
}
| 8,734 | 30.996337 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LogRecordType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.HashMap;
import java.util.Map;
import java.util.Iterator;
class LogRecordType {
static Map<String, LogRecordType> internees = new HashMap<String, LogRecordType>();
final String name;
final int index;
private LogRecordType(String name) {
super();
this.name = name;
index = internees.size();
}
static LogRecordType intern(String typeName) {
LogRecordType result = internees.get(typeName);
if (result == null) {
result = new LogRecordType(typeName);
internees.put(typeName, result);
}
return result;
}
static LogRecordType internSoft(String typeName) {
return internees.get(typeName);
}
@Override
public String toString() {
return name;
}
static String[] lineTypes() {
Iterator<Map.Entry<String, LogRecordType>> iter = internees.entrySet()
.iterator();
String[] result = new String[internees.size()];
for (int i = 0; i < internees.size(); ++i) {
result[i] = iter.next().getKey();
}
return result;
}
}
| 1,889 | 24.540541 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Outputter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Interface to output a sequence of objects of type T.
*/
public interface Outputter<T> extends Closeable {
/**
* Initialize the {@link Outputter} to a specific path.
* @param path The {@link Path} to the output file.
* @param conf Configuration
* @throws IOException
*/
public void init(Path path, Configuration conf) throws IOException;
/**
* Output an object.
* @param object The objecte.
* @throws IOException
*/
public void output(T object) throws IOException;
}
| 1,494 | 32.222222 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedLocation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.tools.rumen.datatypes.NodeName;
import org.codehaus.jackson.annotate.JsonAnySetter;
/**
* A {@link LoggedLocation} is a representation of a point in an hierarchical
* network, represented as a series of membership names, broadest first.
*
* For example, if your network has <i>hosts</i> grouped into <i>racks</i>, then
* in onecluster you might have a node {@code node1} on rack {@code rack1}. This
* would be represented with a ArrayList of two layers, with two {@link String}
* s being {@code "rack1"} and {@code "node1"}.
*
* The details of this class are set up to meet the requirements of the Jackson
* JSON parser/generator.
*
* All of the public methods are simply accessors for the instance variables we
* want to write out in the JSON files.
*
*/
public class LoggedLocation implements DeepCompare {
static final Map<List<String>, List<NodeName>> layersCache =
new HashMap<List<String>, List<NodeName>>();
/**
* The full path from the root of the network to the host.
*
* NOTE that this assumes that the network topology is a tree.
*/
List<NodeName> layers = Collections.emptyList();
static private Set<String> alreadySeenAnySetterAttributes =
new TreeSet<String>();
public List<NodeName> getLayers() {
return layers;
}
void setLayers(List<String> layers) {
if (layers == null || layers.isEmpty()) {
this.layers = Collections.emptyList();
} else {
synchronized (layersCache) {
List<NodeName> found = layersCache.get(layers);
if (found == null) {
// make a copy with interned string.
List<NodeName> clone = new ArrayList<NodeName>(layers.size());
clone.add(new NodeName(layers.get(0).intern(), null));
clone.add(new NodeName(null, layers.get(1).intern()));
// making it read-only as we are sharing them.
List<NodeName> readonlyLayers = Collections.unmodifiableList(clone);
List<String> readonlyLayersKey = Collections.unmodifiableList(layers);
layersCache.put(readonlyLayersKey, readonlyLayers);
this.layers = readonlyLayers;
} else {
this.layers = found;
}
}
}
}
// for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println("In LoggedJob, we saw the unknown attribute "
+ attributeName + ".");
}
}
// I'll treat this as an atomic object type
private void compareStrings(List<NodeName> c1, List<NodeName> c2,
TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
TreePath recursePath = new TreePath(loc, eltname);
if (c1 == null || c2 == null || (c1.size() != c2.size())) {
throw new DeepInequalityException(eltname + " miscompared", recursePath);
}
for (NodeName n1 : c1) {
boolean found = false;
for (NodeName n2 : c2) {
if (n1.getValue().equals(n2.getValue())) {
found = true;
break;
}
}
if (!found) {
throw new DeepInequalityException(eltname
+ " miscompared [" + n1.getValue() +"]", recursePath);
}
}
}
public void deepCompare(DeepCompare comparand, TreePath loc)
throws DeepInequalityException {
if (!(comparand instanceof LoggedLocation)) {
throw new DeepInequalityException("comparand has wrong type", loc);
}
LoggedLocation other = (LoggedLocation) comparand;
compareStrings(layers, other.layers, loc, "layers");
}
}
| 4,832 | 33.276596 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.text.ParseException;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
abstract class HistoryEventEmitter {
static final private Log LOG = LogFactory.getLog(HistoryEventEmitter.class);
abstract List<SingleEventEmitter> nonFinalSEEs();
abstract List<SingleEventEmitter> finalSEEs();
protected HistoryEventEmitter() {
// no code
}
enum PostEmitAction {
NONE, REMOVE_HEE
};
final Pair<Queue<HistoryEvent>, PostEmitAction> emitterCore(ParsedLine line,
String name) {
Queue<HistoryEvent> results = new LinkedList<HistoryEvent>();
PostEmitAction removeEmitter = PostEmitAction.NONE;
for (SingleEventEmitter see : nonFinalSEEs()) {
HistoryEvent event = see.maybeEmitEvent(line, name, this);
if (event != null) {
results.add(event);
}
}
for (SingleEventEmitter see : finalSEEs()) {
HistoryEvent event = see.maybeEmitEvent(line, name, this);
if (event != null) {
results.add(event);
removeEmitter = PostEmitAction.REMOVE_HEE;
break;
}
}
return new Pair<Queue<HistoryEvent>, PostEmitAction>(results, removeEmitter);
}
protected static Counters maybeParseCounters(String counters) {
try {
return parseCounters(counters);
} catch (ParseException e) {
LOG.warn("The counter string, \"" + counters + "\" is badly formatted.");
return null;
}
}
protected static Counters parseCounters(String counters)
throws ParseException {
if (counters == null) {
LOG.warn("HistoryEventEmitters: null counter detected:");
return null;
}
counters = counters.replace("\\.", "\\\\.");
counters = counters.replace("\\\\{", "\\{");
counters = counters.replace("\\\\}", "\\}");
counters = counters.replace("\\\\(", "\\(");
counters = counters.replace("\\\\)", "\\)");
counters = counters.replace("\\\\[", "\\[");
counters = counters.replace("\\\\]", "\\]");
org.apache.hadoop.mapred.Counters depForm =
org.apache.hadoop.mapred.Counters.fromEscapedCompactString(counters);
return new Counters(depForm);
}
}
| 3,194 | 32.28125 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Version20LogInterfaceUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import org.apache.hadoop.mapreduce.TaskType;
// This class exists to hold a bunch of static utils. It's never instantiated.
abstract class Version20LogInterfaceUtils {
static TaskType get20TaskType(String taskType) {
try {
return TaskType.valueOf(taskType);
} catch (IllegalArgumentException e) {
if ("CLEANUP".equals(taskType)) {
return TaskType.JOB_CLEANUP;
}
if ("SETUP".equals(taskType)) {
return TaskType.JOB_SETUP;
}
return null;
}
}
}
| 1,363 | 31.47619 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/CDFRandomGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.tools.rumen;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
/**
* An instance of this class generates random values that confirm to the
* embedded {@link LoggedDiscreteCDF} . The discrete CDF is a pointwise
* approximation of the "real" CDF. We therefore have a choice of interpolation
* rules.
*
* A concrete subclass of this abstract class will implement valueAt(double)
* using a class-dependent interpolation rule.
*
*/
public abstract class CDFRandomGenerator {
final double[] rankings;
final long[] values;
final Random random;
CDFRandomGenerator(LoggedDiscreteCDF cdf) {
this(cdf, new Random());
}
CDFRandomGenerator(LoggedDiscreteCDF cdf, long seed) {
this(cdf, new Random(seed));
}
private CDFRandomGenerator(LoggedDiscreteCDF cdf, Random random) {
this.random = random;
rankings = new double[cdf.getRankings().size() + 2];
values = new long[cdf.getRankings().size() + 2];
initializeTables(cdf);
}
protected final void initializeTables(LoggedDiscreteCDF cdf) {
rankings[0] = 0.0;
values[0] = cdf.getMinimum();
rankings[rankings.length - 1] = 1.0;
values[rankings.length - 1] = cdf.getMaximum();
List<LoggedSingleRelativeRanking> subjects = cdf.getRankings();
for (int i = 0; i < subjects.size(); ++i) {
rankings[i + 1] = subjects.get(i).getRelativeRanking();
values[i + 1] = subjects.get(i).getDatum();
}
}
protected int floorIndex(double probe) {
int result = Arrays.binarySearch(rankings, probe);
return Math.abs(result + 1) - 1;
}
protected double getRankingAt(int index) {
return rankings[index];
}
protected long getDatumAt(int index) {
return values[index];
}
public long randomValue() {
return valueAt(random.nextDouble());
}
public abstract long valueAt(double probability);
}
| 2,707 | 29.088889 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DeepInequalityException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
/**
* We use this exception class in the unit test, and we do a deep comparison
* when we run the
*
*/
public class DeepInequalityException extends Exception {
static final long serialVersionUID = 1352469876;
final TreePath path;
/**
* @param message
* an exception message
* @param path
* the path that gets from the root to the inequality
*
* This is the constructor that I intend to have used for this
* exception.
*/
public DeepInequalityException(String message, TreePath path,
Throwable chainee) {
super(message, chainee);
this.path = path;
}
/**
* @param message
* an exception message
* @param path
* the path that gets from the root to the inequality
*
* This is the constructor that I intend to have used for this
* exception.
*/
public DeepInequalityException(String message, TreePath path) {
super(message);
this.path = path;
}
}
| 1,860 | 29.016129 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pair.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
class Pair<CarType, CdrType> {
private final CarType car;
private final CdrType cdr;
Pair(CarType car, CdrType cdr) {
super();
this.car = car;
this.cdr = cdr;
}
CarType first() {
return car;
}
CdrType second() {
return cdr;
}
}
| 1,115 | 27.615385 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DefaultInputDemuxer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* {@link DefaultInputDemuxer} acts as a pass-through demuxer. It just opens
* each file and returns back the input stream. If the input is compressed, it
* would return a decompression stream.
*/
public class DefaultInputDemuxer implements InputDemuxer {
String name;
InputStream input;
@Override
public void bindTo(Path path, Configuration conf) throws IOException {
if (name != null) { // re-binding before the previous one was consumed.
close();
}
name = path.getName();
input = new PossiblyDecompressedInputStream(path, conf);
return;
}
@Override
public Pair<String, InputStream> getNext() throws IOException {
if (name != null) {
Pair<String, InputStream> ret =
new Pair<String, InputStream>(name, input);
name = null;
input = null;
return ret;
}
return null;
}
@Override
public void close() throws IOException {
try {
if (input != null) {
input.close();
}
} finally {
name = null;
input = null;
}
}
}
| 2,042 | 27.774648 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ClusterTopologyReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Reading JSON-encoded cluster topology and produce the parsed
* {@link LoggedNetworkTopology} object.
*/
public class ClusterTopologyReader {
private LoggedNetworkTopology topology;
private void readTopology(JsonObjectMapperParser<LoggedNetworkTopology> parser)
throws IOException {
try {
topology = parser.getNext();
if (topology == null) {
throw new IOException(
"Input file does not contain valid topology data.");
}
} finally {
parser.close();
}
}
/**
* Constructor.
*
* @param path
* Path to the JSON-encoded topology file, possibly compressed.
* @param conf
* @throws IOException
*/
public ClusterTopologyReader(Path path, Configuration conf)
throws IOException {
JsonObjectMapperParser<LoggedNetworkTopology> parser = new JsonObjectMapperParser<LoggedNetworkTopology>(
path, LoggedNetworkTopology.class, conf);
readTopology(parser);
}
/**
* Constructor.
*
* @param input
* The input stream for the JSON-encoded topology data.
*/
public ClusterTopologyReader(InputStream input) throws IOException {
JsonObjectMapperParser<LoggedNetworkTopology> parser = new JsonObjectMapperParser<LoggedNetworkTopology>(
input, LoggedNetworkTopology.class);
readTopology(parser);
}
/**
* Get the {@link LoggedNetworkTopology} object.
*
* @return The {@link LoggedNetworkTopology} object parsed from the input.
*/
public LoggedNetworkTopology get() {
return topology;
}
}
| 2,554 | 30.158537 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DeepCompare.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
/**
* Classes that implement this interface can deep-compare [for equality only,
* not order] with another instance. They do a deep compare. If there is any
* semantically significant difference, an implementer throws an Exception to be
* thrown with a chain of causes describing the chain of field references and
* indices that get you to the miscompared point.
*
*/
public interface DeepCompare {
/**
* @param other
* the other comparand that's being compared to me
* @param myLocation
* the path that got to me. In the root, myLocation is null. To
* process the scalar {@code foo} field of the root we will make a
* recursive call with a {@link TreePath} whose {@code fieldName} is
* {@code "bar"} and whose {@code index} is -1 and whose {@code
* parent} is {@code null}. To process the plural {@code bar} field
* of the root we will make a recursive call with a {@link TreePath}
* whose fieldName is {@code "foo"} and whose {@code index} is -1 and
* whose {@code parent} is also {@code null}.
* @throws DeepInequalityException
*/
public void deepCompare(DeepCompare other, TreePath myLocation)
throws DeepInequalityException;
}
| 2,117 | 45.043478 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ZombieJobProducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Producing {@link JobStory}s from job trace.
*/
public class ZombieJobProducer implements JobStoryProducer {
private final JobTraceReader reader;
private final ZombieCluster cluster;
private boolean hasRandomSeed = false;
private long randomSeed = 0;
private ZombieJobProducer(JobTraceReader reader, ZombieCluster cluster,
boolean hasRandomSeed, long randomSeed) {
this.reader = reader;
this.cluster = cluster;
this.hasRandomSeed = hasRandomSeed;
this.randomSeed = (hasRandomSeed) ? randomSeed : System.nanoTime();
}
/**
* Constructor
*
* @param path
* Path to the JSON trace file, possibly compressed.
* @param cluster
* The topology of the cluster that corresponds to the jobs in the
* trace. The argument can be null if we do not have knowledge of the
* cluster topology.
* @param conf
* @throws IOException
*/
public ZombieJobProducer(Path path, ZombieCluster cluster, Configuration conf)
throws IOException {
this(new JobTraceReader(path, conf), cluster, false, -1);
}
/**
* Constructor
*
* @param path
* Path to the JSON trace file, possibly compressed.
* @param cluster
* The topology of the cluster that corresponds to the jobs in the
* trace. The argument can be null if we do not have knowledge of the
* cluster topology.
* @param conf
* @param randomSeed
* use a deterministic seed.
* @throws IOException
*/
public ZombieJobProducer(Path path, ZombieCluster cluster,
Configuration conf, long randomSeed) throws IOException {
this(new JobTraceReader(path, conf), cluster, true, randomSeed);
}
/**
* Constructor
*
* @param input
* The input stream for the JSON trace.
* @param cluster
* The topology of the cluster that corresponds to the jobs in the
* trace. The argument can be null if we do not have knowledge of the
* cluster topology.
* @throws IOException
*/
public ZombieJobProducer(InputStream input, ZombieCluster cluster)
throws IOException {
this(new JobTraceReader(input), cluster, false, -1);
}
/**
* Constructor
*
* @param input
* The input stream for the JSON trace.
* @param cluster
* The topology of the cluster that corresponds to the jobs in the
* trace. The argument can be null if we do not have knowledge of the
* cluster topology.
* @param randomSeed
* use a deterministic seed.
* @throws IOException
*/
public ZombieJobProducer(InputStream input, ZombieCluster cluster,
long randomSeed) throws IOException {
this(new JobTraceReader(input), cluster, true, randomSeed);
}
@Override
public ZombieJob getNextJob() throws IOException {
LoggedJob job = reader.getNext();
if (job == null) {
return null;
} else if (hasRandomSeed) {
long subRandomSeed = RandomSeedGenerator.getSeed(
"forZombieJob" + job.getJobID(), randomSeed);
return new ZombieJob(job, cluster, subRandomSeed);
} else {
return new ZombieJob(job, cluster);
}
}
@Override
public void close() throws IOException {
reader.close();
}
}
| 4,309 | 31.164179 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobTraceReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Reading JSON-encoded job traces and produce {@link LoggedJob} instances.
*/
public class JobTraceReader extends JsonObjectMapperParser<LoggedJob> {
/**
* Constructor.
*
* @param path
* Path to the JSON trace file, possibly compressed.
* @param conf
* @throws IOException
*/
public JobTraceReader(Path path, Configuration conf) throws IOException {
super(path, LoggedJob.class, conf);
}
/**
* Constructor.
*
* @param input
* The input stream for the JSON trace.
*/
public JobTraceReader(InputStream input) throws IOException {
super(input, LoggedJob.class);
}
}
| 1,630 | 30.365385 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobHistoryUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.jobhistory.JhCounter;
import org.apache.hadoop.mapreduce.jobhistory.JhCounterGroup;
import org.apache.hadoop.mapreduce.jobhistory.JhCounters;
import org.apache.hadoop.mapreduce.v2.hs.JobHistory;
/**
* Job History related utils for handling multiple formats of history logs of
* different hadoop versions like Pre21 history logs, current history logs.
*/
public class JobHistoryUtils {
private static String applyParser(String fileName, Pattern pattern) {
Matcher matcher = pattern.matcher(fileName);
if (!matcher.matches()) {
return null;
}
return matcher.group(1);
}
/**
* Extracts jobID string from the given job history log file name or
* job history configuration file name.
* @param fileName name of job history file or job history configuration file
* @return a valid jobID String, parsed out of the file name. Otherwise,
* [especially for .crc files] returns null.
*/
static String extractJobID(String fileName) {
// Get jobID if fileName is a config file name.
String jobId = extractJobIDFromConfFileName(fileName);
if (jobId == null) {
// Get JobID if fileName is a job history file name
jobId = extractJobIDFromHistoryFileName(fileName);
}
return jobId;
}
/**
* Extracts job id from the current hadoop version's job history file name.
* @param fileName job history file name from which job id is to be extracted
* @return job id if the history file name format is same as that of the
* current hadoop version. Returns null otherwise.
*/
private static String extractJobIDFromCurrentHistoryFile(String fileName) {
JobID id = null;
if (org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils
.isValidJobHistoryFileName(fileName)) {
try {
id = org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils
.getJobIDFromHistoryFilePath(fileName);
} catch (IOException e) {
// Ignore this exception and go ahead with getting of jobID assuming
// older hadoop verison's history file
}
}
if (id != null) {
return id.toString();
}
return null;
}
/**
* Extracts jobID string from the given job history file name.
* @param fileName name of the job history file
* @return JobID if the given <code>fileName</code> is a valid job history
* file name, <code>null</code> otherwise.
*/
private static String extractJobIDFromHistoryFileName(String fileName) {
// History file name could be in one of the following formats
// (1) old pre21 job history file name format
// (2) new pre21 job history file name format
// (3) current job history file name format i.e. 0.22
// Try to get the jobID assuming that the history file is from the current
// hadoop version
String jobID = extractJobIDFromCurrentHistoryFile(fileName);
if (jobID != null) {
return jobID;//history file is of current hadoop version
}
// History file could be of older hadoop versions
String pre21JobID = applyParser(fileName,
Pre21JobHistoryConstants.JOBHISTORY_FILENAME_REGEX_V1);
if (pre21JobID == null) {
pre21JobID = applyParser(fileName,
Pre21JobHistoryConstants.JOBHISTORY_FILENAME_REGEX_V2);
}
return pre21JobID;
}
/**
* Extracts jobID string from the given job conf xml file name.
* @param fileName name of the job conf xml file
* @return job id if the given <code>fileName</code> is a valid job conf xml
* file name, <code>null</code> otherwise.
*/
private static String extractJobIDFromConfFileName(String fileName) {
// History conf file name could be in one of the following formats
// (1) old pre21 job history file name format
// (2) new pre21 job history file name format
// (3) current job history file name format i.e. 0.22
String pre21JobID = applyParser(fileName,
Pre21JobHistoryConstants.CONF_FILENAME_REGEX_V1);
if (pre21JobID == null) {
pre21JobID = applyParser(fileName,
Pre21JobHistoryConstants.CONF_FILENAME_REGEX_V2);
}
if (pre21JobID != null) {
return pre21JobID;
}
return applyParser(fileName, JobHistory.CONF_FILENAME_REGEX);
}
/**
* Checks if the given <code>fileName</code> is a valid job conf xml file name
* @param fileName name of the file to be validated
* @return <code>true</code> if the given <code>fileName</code> is a valid
* job conf xml file name.
*/
static boolean isJobConfXml(String fileName) {
String jobId = extractJobIDFromConfFileName(fileName);
return jobId != null;
}
/**
* Extract/Add counters into the Map from the given JhCounters object.
* @param counters the counters to be extracted from
* @return the map of counters
*/
static Map<String, Long> extractCounters(JhCounters counters) {
Map<String, Long> countersMap = new HashMap<String, Long>();
if (counters != null) {
for (JhCounterGroup group : counters.groups) {
for (JhCounter counter : group.counts) {
countersMap.put(counter.name.toString(), counter.value);
}
}
}
return countersMap;
}
}
| 6,305 | 36.313609 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.jobhistory.JhCounters;
/**
* This is a wrapper class around {@link LoggedTask}. This provides also the
* extra information about the task obtained from job history which is not
* written to the JSON trace file.
*/
public class ParsedTask extends LoggedTask {
private static final Log LOG = LogFactory.getLog(ParsedTask.class);
private String diagnosticInfo;
private String failedDueToAttempt;
private Map<String, Long> countersMap = new HashMap<String, Long>();
ParsedTask() {
super();
}
public void incorporateCounters(JhCounters counters) {
Map<String, Long> countersMap =
JobHistoryUtils.extractCounters(counters);
putCounters(countersMap);
super.incorporateCounters(counters);
}
/** Set the task counters */
public void putCounters(Map<String, Long> counters) {
this.countersMap = counters;
}
/**
* @return the task counters
*/
public Map<String, Long> obtainCounters() {
return countersMap;
}
/** Set the task diagnostic-info */
public void putDiagnosticInfo(String msg) {
diagnosticInfo = msg;
}
/**
* @return the diagnostic-info of this task.
* If the task is successful, returns null.
*/
public String obtainDiagnosticInfo() {
return diagnosticInfo;
}
/**
* Set the failed-due-to-attemptId info of this task.
*/
public void putFailedDueToAttemptId(String attempt) {
failedDueToAttempt = attempt;
}
/**
* @return the failed-due-to-attemptId info of this task.
* If the task is successful, returns null.
*/
public String obtainFailedDueToAttemptId() {
return failedDueToAttempt;
}
/**
* @return the list of attempts of this task.
*/
public List<ParsedTaskAttempt> obtainTaskAttempts() {
List<LoggedTaskAttempt> attempts = getAttempts();
return convertTaskAttempts(attempts);
}
List<ParsedTaskAttempt> convertTaskAttempts(
List<LoggedTaskAttempt> attempts) {
List<ParsedTaskAttempt> result = new ArrayList<ParsedTaskAttempt>();
for (LoggedTaskAttempt t : attempts) {
if (t instanceof ParsedTaskAttempt) {
result.add((ParsedTaskAttempt)t);
} else {
throw new RuntimeException(
"Unexpected type of taskAttempts in the list...");
}
}
return result;
}
/** Dump the extra info of ParsedTask */
void dumpParsedTask() {
LOG.info("ParsedTask details:" + obtainCounters()
+ "\n" + obtainFailedDueToAttemptId()
+ "\nPreferred Locations are:");
List<LoggedLocation> loc = getPreferredLocations();
for (LoggedLocation l : loc) {
LOG.info(l.getLayers() + ";" + l.toString());
}
List<ParsedTaskAttempt> attempts = obtainTaskAttempts();
for (ParsedTaskAttempt attempt : attempts) {
attempt.dumpParsedTaskAttempt();
}
}
}
| 3,898 | 28.537879 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.Arrays;
import org.apache.hadoop.mapreduce.MRJobConfig;
public enum JobConfPropertyNames {
QUEUE_NAMES("mapred.job.queue.name", MRJobConfig.QUEUE_NAME),
JOB_NAMES("mapred.job.name", MRJobConfig.JOB_NAME),
TASK_JAVA_OPTS_S("mapred.child.java.opts"),
MAP_JAVA_OPTS_S("mapred.child.java.opts", MRJobConfig.MAP_JAVA_OPTS),
REDUCE_JAVA_OPTS_S("mapred.child.java.opts", MRJobConfig.REDUCE_JAVA_OPTS);
private String[] candidates;
JobConfPropertyNames(String... candidates) {
this.candidates = candidates;
}
public String[] getCandidates() {
return Arrays.copyOf(candidates, candidates.length);
}
}
| 1,489 | 35.341463 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.ArrayList;
import java.util.Comparator;
import org.apache.hadoop.tools.rumen.datatypes.NodeName;
import org.codehaus.jackson.annotate.JsonAnySetter;
/**
* A {@link LoggedNetworkTopology} represents a tree that in turn represents a
* hierarchy of hosts. The current version requires the tree to have all leaves
* at the same level.
*
* All of the public methods are simply accessors for the instance variables we
* want to write out in the JSON files.
*
*/
public class LoggedNetworkTopology implements DeepCompare {
NodeName name;
List<LoggedNetworkTopology> children = new ArrayList<LoggedNetworkTopology>();
static private Set<String> alreadySeenAnySetterAttributes =
new TreeSet<String>();
public LoggedNetworkTopology() {
super();
}
// for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println("In LoggedJob, we saw the unknown attribute "
+ attributeName + ".");
}
}
/**
* We need this because we have to sort the {@code children} field. That field
* is set-valued, but if we sort these fields we ensure that comparisons won't
* bogusly fail because the hash table happened to enumerate in a different
* order.
*
*/
static class TopoSort implements Comparator<LoggedNetworkTopology>,
Serializable {
public int compare(LoggedNetworkTopology t1, LoggedNetworkTopology t2) {
return t1.name.getValue().compareTo(t2.name.getValue());
}
}
/**
* @param hosts
* a HashSet of the {@link ParsedHost}
* @param name
* the name of this level's host [for recursive descent]
* @param level
* the level number
*/
LoggedNetworkTopology(Set<ParsedHost> hosts, String name, int level) {
if (name == null) {
this.name = NodeName.ROOT;
} else {
this.name = new NodeName(name);
}
this.children = null;
if (level < ParsedHost.numberOfDistances() - 1) {
HashMap<String, HashSet<ParsedHost>> topologies =
new HashMap<String, HashSet<ParsedHost>>();
Iterator<ParsedHost> iter = hosts.iterator();
while (iter.hasNext()) {
ParsedHost host = iter.next();
String thisComponent = host.nameComponent(level);
HashSet<ParsedHost> thisSet = topologies.get(thisComponent);
if (thisSet == null) {
thisSet = new HashSet<ParsedHost>();
topologies.put(thisComponent, thisSet);
}
thisSet.add(host);
}
children = new ArrayList<LoggedNetworkTopology>();
for (Map.Entry<String, HashSet<ParsedHost>> ent : topologies.entrySet()) {
children.add(new LoggedNetworkTopology(ent.getValue(), ent.getKey(),
level + 1));
}
} else {
// nothing to do here
}
}
LoggedNetworkTopology(Set<ParsedHost> hosts) {
this(hosts, null, 0);
}
public NodeName getName() {
return name;
}
void setName(String name) {
this.name = new NodeName(name);
}
public List<LoggedNetworkTopology> getChildren() {
return children;
}
void setChildren(List<LoggedNetworkTopology> children) {
this.children = children;
}
private void compare1(List<LoggedNetworkTopology> c1,
List<LoggedNetworkTopology> c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || c1.size() != c2.size()) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
Collections.sort(c1, new TopoSort());
Collections.sort(c2, new TopoSort());
for (int i = 0; i < c1.size(); ++i) {
c1.get(i).deepCompare(c2.get(i), new TreePath(loc, eltname, i));
}
}
public void deepCompare(DeepCompare comparand, TreePath loc)
throws DeepInequalityException {
if (!(comparand instanceof LoggedNetworkTopology)) {
throw new DeepInequalityException("comparand has wrong type", loc);
}
LoggedNetworkTopology other = (LoggedNetworkTopology) comparand;
compare1(children, other.children, loc, "children");
}
}
| 5,416 | 29.432584 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.List;
import org.apache.hadoop.mapred.TaskStatus.State;
/**
* {@link TaskAttemptInfo} is a collection of statistics about a particular
* task-attempt gleaned from job-history of the job.
*/
public abstract class TaskAttemptInfo {
protected final State state;
protected final TaskInfo taskInfo;
protected final List<List<Integer>> allSplits;
protected TaskAttemptInfo
(State state, TaskInfo taskInfo, List<List<Integer>> allSplits) {
if (state == State.SUCCEEDED || state == State.FAILED) {
this.state = state;
} else {
throw new IllegalArgumentException("status cannot be " + state);
}
this.taskInfo = taskInfo;
this.allSplits = allSplits;
}
protected TaskAttemptInfo
(State state, TaskInfo taskInfo) {
this(state, taskInfo, LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector());
}
/**
* Get the final {@link State} of the task-attempt.
*
* @return the final <code>State</code> of the task-attempt
*/
public State getRunState() {
return state;
}
/**
* Get the total runtime for the task-attempt.
*
* @return the total runtime for the task-attempt
*/
public abstract long getRuntime();
/**
* Get the {@link TaskInfo} for the given task-attempt.
*
* @return the <code>TaskInfo</code> for the given task-attempt
*/
public TaskInfo getTaskInfo() {
return taskInfo;
}
public List<Integer> getSplitVector(LoggedTaskAttempt.SplitVectorKind kind) {
return kind.get(allSplits);
}
}
| 2,393 | 29.303797 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ZombieCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* {@link ZombieCluster} rebuilds the cluster topology using the information
* obtained from job history logs.
*/
public class ZombieCluster extends AbstractClusterStory {
private Node root;
/**
* Construct a homogeneous cluster. We assume that the leaves on the topology
* are {@link MachineNode}s, and the parents of {@link MachineNode}s are
* {@link RackNode}s. We also expect all leaf nodes are on the same level.
*
* @param topology
* The network topology.
* @param defaultNode
* The default node setting.
*/
public ZombieCluster(LoggedNetworkTopology topology, MachineNode defaultNode) {
buildCluster(topology, defaultNode);
}
/**
* Construct a homogeneous cluster. We assume that the leaves on the topology
* are {@link MachineNode}s, and the parents of {@link MachineNode}s are
* {@link RackNode}s. We also expect all leaf nodes are on the same level.
*
* @param path Path to the JSON-encoded topology file.
* @param conf
* @param defaultNode
* The default node setting.
* @throws IOException
*/
public ZombieCluster(Path path, MachineNode defaultNode, Configuration conf) throws IOException {
this(new ClusterTopologyReader(path, conf).get(), defaultNode);
}
/**
* Construct a homogeneous cluster. We assume that the leaves on the topology
* are {@link MachineNode}s, and the parents of {@link MachineNode}s are
* {@link RackNode}s. We also expect all leaf nodes are on the same level.
*
* @param input The input stream for the JSON-encoded topology file.
* @param defaultNode
* The default node setting.
* @throws IOException
*/
public ZombieCluster(InputStream input, MachineNode defaultNode) throws IOException {
this(new ClusterTopologyReader(input).get(), defaultNode);
}
@Override
public Node getClusterTopology() {
return root;
}
private final void buildCluster(LoggedNetworkTopology topology,
MachineNode defaultNode) {
Map<LoggedNetworkTopology, Integer> levelMapping =
new IdentityHashMap<LoggedNetworkTopology, Integer>();
Deque<LoggedNetworkTopology> unvisited =
new ArrayDeque<LoggedNetworkTopology>();
unvisited.add(topology);
levelMapping.put(topology, 0);
// building levelMapping and determine leafLevel
int leafLevel = -1; // -1 means leafLevel unknown.
for (LoggedNetworkTopology n = unvisited.poll(); n != null;
n = unvisited.poll()) {
int level = levelMapping.get(n);
List<LoggedNetworkTopology> children = n.getChildren();
if (children == null || children.isEmpty()) {
if (leafLevel == -1) {
leafLevel = level;
} else if (leafLevel != level) {
throw new IllegalArgumentException(
"Leaf nodes are not on the same level");
}
} else {
for (LoggedNetworkTopology child : children) {
levelMapping.put(child, level + 1);
unvisited.addFirst(child);
}
}
}
/**
* A second-pass dfs traverse of topology tree. path[i] contains the parent
* of the node at level i+1.
*/
Node[] path = new Node[leafLevel];
unvisited.add(topology);
for (LoggedNetworkTopology n = unvisited.poll(); n != null;
n = unvisited.poll()) {
int level = levelMapping.get(n);
Node current;
if (level == leafLevel) { // a machine node
MachineNode.Builder builder =
new MachineNode.Builder(n.getName().getValue(), level);
if (defaultNode != null) {
builder.cloneFrom(defaultNode);
}
current = builder.build();
} else {
current = (level == leafLevel - 1)
? new RackNode(n.getName().getValue(), level) :
new Node(n.getName().getValue(), level);
path[level] = current;
// Add all children to the front of the queue.
for (LoggedNetworkTopology child : n.getChildren()) {
unvisited.addFirst(child);
}
}
if (level != 0) {
path[level - 1].addChild(current);
}
}
root = path[0];
}
}
| 5,276 | 33.94702 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TreePath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
/**
* This describes a path from a node to the root. We use it when we compare two
* trees during rumen unit tests. If the trees are not identical, this chain
* will be converted to a string which describes the path from the root to the
* fields that did not compare.
*
*/
public class TreePath {
final TreePath parent;
final String fieldName;
final int index;
public TreePath(TreePath parent, String fieldName) {
super();
this.parent = parent;
this.fieldName = fieldName;
this.index = -1;
}
public TreePath(TreePath parent, String fieldName, int index) {
super();
this.parent = parent;
this.fieldName = fieldName;
this.index = index;
}
@Override
public String toString() {
String mySegment = fieldName + (index == -1 ? "" : ("[" + index + "]"));
return ((parent == null) ? "" : parent.toString() + "-->") + mySegment;
}
}
| 1,746 | 29.12069 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Node.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.Collections;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
/**
* {@link Node} represents a node in the cluster topology. A node can be a
* {@link MachineNode}, or a {@link RackNode}, etc.
*/
public class Node implements Comparable<Node> {
private static final SortedSet<Node> EMPTY_SET =
Collections.unmodifiableSortedSet(new TreeSet<Node>());
private Node parent;
private final String name;
private final int level;
private SortedSet<Node> children;
/**
* @param name
* A unique name to identify a node in the cluster.
* @param level
* The level of the node in the cluster
*/
public Node(String name, int level) {
if (name == null) {
throw new IllegalArgumentException("Node name cannot be null");
}
if (level < 0) {
throw new IllegalArgumentException("Level cannot be negative");
}
this.name = name;
this.level = level;
}
/**
* Get the name of the node.
*
* @return The name of the node.
*/
public String getName() {
return name;
}
/**
* Get the level of the node.
* @return The level of the node.
*/
public int getLevel() {
return level;
}
private void checkChildren() {
if (children == null) {
children = new TreeSet<Node>();
}
}
/**
* Add a child node to this node.
* @param child The child node to be added. The child node should currently not be belong to another cluster topology.
* @return Boolean indicating whether the node is successfully added.
*/
public synchronized boolean addChild(Node child) {
if (child.parent != null) {
throw new IllegalArgumentException(
"The child is already under another node:" + child.parent);
}
checkChildren();
boolean retval = children.add(child);
if (retval) child.parent = this;
return retval;
}
/**
* Does this node have any children?
* @return Boolean indicate whether this node has any children.
*/
public synchronized boolean hasChildren() {
return children != null && !children.isEmpty();
}
/**
* Get the children of this node.
*
* @return The children of this node. If no child, an empty set will be
* returned. The returned set is read-only.
*/
public synchronized Set<Node> getChildren() {
return (children == null) ? EMPTY_SET :
Collections.unmodifiableSortedSet(children);
}
/**
* Get the parent node.
* @return the parent node. If root node, return null.
*/
public Node getParent() {
return parent;
}
@Override
public int hashCode() {
return name.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj.getClass() != this.getClass())
return false;
Node other = (Node) obj;
return name.equals(other.name);
}
@Override
public String toString() {
return "(" + name +", " + level +")";
}
@Override
public int compareTo(Node o) {
return name.compareTo(o.name);
}
}
| 3,972 | 25.66443 | 120 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ResourceUsageMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* Captures the resource usage metrics.
*/
public class ResourceUsageMetrics implements Writable, DeepCompare {
private long cumulativeCpuUsage;
private long virtualMemoryUsage;
private long physicalMemoryUsage;
private long heapUsage;
public ResourceUsageMetrics() {
}
/**
* Get the cumulative CPU usage.
*/
public long getCumulativeCpuUsage() {
return cumulativeCpuUsage;
}
/**
* Set the cumulative CPU usage.
*/
public void setCumulativeCpuUsage(long usage) {
cumulativeCpuUsage = usage;
}
/**
* Get the virtual memory usage.
*/
public long getVirtualMemoryUsage() {
return virtualMemoryUsage;
}
/**
* Set the virtual memory usage.
*/
public void setVirtualMemoryUsage(long usage) {
virtualMemoryUsage = usage;
}
/**
* Get the physical memory usage.
*/
public long getPhysicalMemoryUsage() {
return physicalMemoryUsage;
}
/**
* Set the physical memory usage.
*/
public void setPhysicalMemoryUsage(long usage) {
physicalMemoryUsage = usage;
}
/**
* Get the total heap usage.
*/
public long getHeapUsage() {
return heapUsage;
}
/**
* Set the total heap usage.
*/
public void setHeapUsage(long usage) {
heapUsage = usage;
}
/**
* Returns the size of the serialized data
*/
public int size() {
int size = 0;
size += WritableUtils.getVIntSize(cumulativeCpuUsage); // long #1
size += WritableUtils.getVIntSize(virtualMemoryUsage); // long #2
size += WritableUtils.getVIntSize(physicalMemoryUsage); // long #3
size += WritableUtils.getVIntSize(heapUsage); // long #4
return size;
}
@Override
public void readFields(DataInput in) throws IOException {
cumulativeCpuUsage = WritableUtils.readVLong(in); // long #1
virtualMemoryUsage = WritableUtils.readVLong(in); // long #2
physicalMemoryUsage = WritableUtils.readVLong(in); // long #3
heapUsage = WritableUtils.readVLong(in); // long #4
}
@Override
public void write(DataOutput out) throws IOException {
//TODO Write resources version no too
WritableUtils.writeVLong(out, cumulativeCpuUsage); // long #1
WritableUtils.writeVLong(out, virtualMemoryUsage); // long #2
WritableUtils.writeVLong(out, physicalMemoryUsage); // long #3
WritableUtils.writeVLong(out, heapUsage); // long #4
}
private static void compareMetric(long m1, long m2, TreePath loc)
throws DeepInequalityException {
if (m1 != m2) {
throw new DeepInequalityException("Value miscompared:" + loc.toString(),
loc);
}
}
private static void compareSize(ResourceUsageMetrics m1,
ResourceUsageMetrics m2, TreePath loc)
throws DeepInequalityException {
if (m1.size() != m2.size()) {
throw new DeepInequalityException("Size miscompared: " + loc.toString(),
loc);
}
}
@Override
public void deepCompare(DeepCompare other, TreePath loc)
throws DeepInequalityException {
if (!(other instanceof ResourceUsageMetrics)) {
throw new DeepInequalityException("Comparand has wrong type", loc);
}
ResourceUsageMetrics metrics2 = (ResourceUsageMetrics) other;
compareMetric(getCumulativeCpuUsage(), metrics2.getCumulativeCpuUsage(),
new TreePath(loc, "cumulativeCpu"));
compareMetric(getVirtualMemoryUsage(), metrics2.getVirtualMemoryUsage(),
new TreePath(loc, "virtualMemory"));
compareMetric(getPhysicalMemoryUsage(), metrics2.getPhysicalMemoryUsage(),
new TreePath(loc, "physicalMemory"));
compareMetric(getHeapUsage(), metrics2.getHeapUsage(),
new TreePath(loc, "heapUsage"));
compareSize(this, metrics2, new TreePath(loc, "size"));
}
}
| 4,941 | 29.695652 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobHistoryParserFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.InputStream;
/**
* {@link JobHistoryParserFactory} is a singleton class that attempts to
* determine the version of job history and return a proper parser.
*/
public class JobHistoryParserFactory {
public static JobHistoryParser getParser(RewindableInputStream ris)
throws IOException {
for (VersionDetector vd : VersionDetector.values()) {
boolean canParse = vd.canParse(ris);
ris.rewind();
if (canParse) {
return vd.newInstance(ris);
}
}
throw new IOException("No suitable parser.");
}
public enum VersionDetector {
Hadoop20() {
@Override
public boolean canParse(InputStream input) throws IOException {
return Hadoop20JHParser.canParse(input);
}
@Override
public JobHistoryParser newInstance(InputStream input) throws IOException {
return new Hadoop20JHParser(input);
}
},
Current() {
@Override
public boolean canParse(InputStream input) throws IOException {
return CurrentJHParser.canParse(input);
}
@Override
public JobHistoryParser newInstance(InputStream input) throws IOException {
return new CurrentJHParser(input);
}
};
abstract JobHistoryParser newInstance(InputStream input) throws IOException;
abstract boolean canParse(InputStream input) throws IOException;
}
}
| 2,264 | 30.027397 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/PossiblyDecompressedInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.Decompressor;
class PossiblyDecompressedInputStream extends InputStream {
private final Decompressor decompressor;
private final InputStream coreInputStream;
public PossiblyDecompressedInputStream(Path inputPath, Configuration conf)
throws IOException {
CompressionCodecFactory codecs = new CompressionCodecFactory(conf);
CompressionCodec inputCodec = codecs.getCodec(inputPath);
FileSystem ifs = inputPath.getFileSystem(conf);
FSDataInputStream fileIn = ifs.open(inputPath);
if (inputCodec == null) {
decompressor = null;
coreInputStream = fileIn;
} else {
decompressor = CodecPool.getDecompressor(inputCodec);
coreInputStream = inputCodec.createInputStream(fileIn, decompressor);
}
}
@Override
public int read() throws IOException {
return coreInputStream.read();
}
@Override
public int read(byte[] buffer, int offset, int length) throws IOException {
return coreInputStream.read(buffer, offset, length);
}
@Override
public void close() throws IOException {
// coreInputStream.close() is called before returning of decompressor to the
// pool because coreInputStream.close() could(though currently it doesn't)
// access the decompressor.
coreInputStream.close();
if (decompressor != null) {
CodecPool.returnDecompressor(decompressor);
}
}
}
| 2,646 | 33.828947 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/CurrentJHParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.mapreduce.jobhistory.EventReader;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.v2.hs.JobHistory;
/**
* {@link JobHistoryParser} that parses {@link JobHistory} files produced by
* {@link org.apache.hadoop.mapreduce.v2.hs.JobHistory} in the same source
* code tree as rumen.
*/
public class CurrentJHParser implements JobHistoryParser {
private EventReader reader;
private static class ForkedDataInputStream extends DataInputStream {
ForkedDataInputStream(InputStream input) {
super(input);
}
@Override
public void close() {
// no code
}
}
/**
* Can this parser parse the input?
*
* @param input
* @return Whether this parser can parse the input.
* @throws IOException
*/
public static boolean canParse(InputStream input) throws IOException {
final DataInputStream in = new ForkedDataInputStream(input);
try {
final EventReader reader = new EventReader(in);
try {
reader.getNextEvent();
} catch (IOException e) {
return false;
} finally {
reader.close();
}
} catch (IOException e) {
return false;
}
return true;
}
public CurrentJHParser(InputStream input) throws IOException {
reader = new EventReader(new DataInputStream(input));
}
@Override
public HistoryEvent nextEvent() throws IOException {
return reader.getNextEvent();
}
@Override
public void close() throws IOException {
reader.close();
}
}
| 2,494 | 27.033708 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TraceBuilder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.v2.hs.JobHistory;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* The main driver of the Rumen Parser.
*/
public class TraceBuilder extends Configured implements Tool {
static final private Log LOG = LogFactory.getLog(TraceBuilder.class);
static final int RUN_METHOD_FAILED_EXIT_CODE = 3;
TopologyBuilder topologyBuilder = new TopologyBuilder();
Outputter<LoggedJob> traceWriter;
Outputter<LoggedNetworkTopology> topologyWriter;
static class MyOptions {
Class<? extends InputDemuxer> inputDemuxerClass = DefaultInputDemuxer.class;
@SuppressWarnings("unchecked")
Class<? extends Outputter> clazzTraceOutputter = DefaultOutputter.class;
Path traceOutput;
Path topologyOutput;
List<Path> inputs = new LinkedList<Path>();
MyOptions(String[] args, Configuration conf) throws FileNotFoundException,
IOException, ClassNotFoundException {
int switchTop = 0;
// to determine if the input paths should be recursively scanned or not
boolean doRecursiveTraversal = false;
while (args[switchTop].startsWith("-")) {
if (args[switchTop].equalsIgnoreCase("-demuxer")) {
inputDemuxerClass =
Class.forName(args[++switchTop]).asSubclass(InputDemuxer.class);
} else if (args[switchTop].equalsIgnoreCase("-recursive")) {
doRecursiveTraversal = true;
}
++switchTop;
}
traceOutput = new Path(args[0 + switchTop]);
topologyOutput = new Path(args[1 + switchTop]);
for (int i = 2 + switchTop; i < args.length; ++i) {
inputs.addAll(processInputArgument(
args[i], conf, doRecursiveTraversal));
}
}
/**
* Compare the history file names, not the full paths.
* Job history file name format is such that doing lexicographic sort on the
* history file names should result in the order of jobs' submission times.
*/
private static class HistoryLogsComparator
implements Comparator<FileStatus>, Serializable {
@Override
public int compare(FileStatus file1, FileStatus file2) {
return file1.getPath().getName().compareTo(
file2.getPath().getName());
}
}
/**
* Processes the input file/folder argument. If the input is a file,
* then it is directly considered for further processing by TraceBuilder.
* If the input is a folder, then all the history logs in the
* input folder are considered for further processing.
*
* If isRecursive is true, then the input path is recursively scanned
* for job history logs for further processing by TraceBuilder.
*
* NOTE: If the input represents a globbed path, then it is first flattened
* and then the individual paths represented by the globbed input
* path are considered for further processing.
*
* @param input input path, possibly globbed
* @param conf configuration
* @param isRecursive whether to recursively traverse the input paths to
* find history logs
* @return the input history log files' paths
* @throws FileNotFoundException
* @throws IOException
*/
static List<Path> processInputArgument(String input, Configuration conf,
boolean isRecursive) throws FileNotFoundException, IOException {
Path inPath = new Path(input);
FileSystem fs = inPath.getFileSystem(conf);
FileStatus[] inStatuses = fs.globStatus(inPath);
List<Path> inputPaths = new LinkedList<Path>();
if (inStatuses == null || inStatuses.length == 0) {
return inputPaths;
}
for (FileStatus inStatus : inStatuses) {
Path thisPath = inStatus.getPath();
if (inStatus.isDirectory()) {
// Find list of files in this path(recursively if -recursive option
// is specified).
List<FileStatus> historyLogs = new ArrayList<FileStatus>();
RemoteIterator<LocatedFileStatus> iter =
fs.listFiles(thisPath, isRecursive);
while (iter.hasNext()) {
LocatedFileStatus child = iter.next();
String fileName = child.getPath().getName();
if (!(fileName.endsWith(".crc") || fileName.startsWith("."))) {
historyLogs.add(child);
}
}
if (historyLogs.size() > 0) {
// Add the sorted history log file names in this path to the
// inputPaths list
FileStatus[] sortableNames =
historyLogs.toArray(new FileStatus[historyLogs.size()]);
Arrays.sort(sortableNames, new HistoryLogsComparator());
for (FileStatus historyLog : sortableNames) {
inputPaths.add(historyLog.getPath());
}
}
} else {
inputPaths.add(thisPath);
}
}
return inputPaths;
}
}
public static void main(String[] args) {
TraceBuilder builder = new TraceBuilder();
int result = RUN_METHOD_FAILED_EXIT_CODE;
try {
result = ToolRunner.run(builder, args);
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
try {
builder.finish();
} finally {
if (result == 0) {
return;
}
System.exit(result);
}
}
}
@SuppressWarnings("unchecked")
@Override
public int run(String[] args) throws Exception {
MyOptions options = new MyOptions(args, getConf());
traceWriter = options.clazzTraceOutputter.newInstance();
traceWriter.init(options.traceOutput, getConf());
topologyWriter = new DefaultOutputter<LoggedNetworkTopology>();
topologyWriter.init(options.topologyOutput, getConf());
try {
JobBuilder jobBuilder = null;
for (Path p : options.inputs) {
InputDemuxer inputDemuxer = options.inputDemuxerClass.newInstance();
try {
inputDemuxer.bindTo(p, getConf());
} catch (IOException e) {
LOG.warn("Unable to bind Path " + p + " . Skipping...", e);
continue;
}
Pair<String, InputStream> filePair = null;
try {
while ((filePair = inputDemuxer.getNext()) != null) {
RewindableInputStream ris =
new RewindableInputStream(filePair.second());
JobHistoryParser parser = null;
try {
String jobID = JobHistoryUtils.extractJobID(filePair.first());
if (jobID == null) {
LOG.warn("File skipped: Invalid file name: "
+ filePair.first());
continue;
}
if ((jobBuilder == null)
|| (!jobBuilder.getJobID().equals(jobID))) {
if (jobBuilder != null) {
traceWriter.output(jobBuilder.build());
}
jobBuilder = new JobBuilder(jobID);
}
if (JobHistoryUtils.isJobConfXml(filePair.first())) {
processJobConf(JobConfigurationParser.parse(ris.rewind()),
jobBuilder);
} else {
parser = JobHistoryParserFactory.getParser(ris);
if (parser == null) {
LOG.warn("File skipped: Cannot find suitable parser: "
+ filePair.first());
} else {
processJobHistory(parser, jobBuilder);
}
}
} finally {
if (parser == null) {
ris.close();
} else {
parser.close();
parser = null;
}
}
}
} catch (Throwable t) {
if (filePair != null) {
LOG.warn("TraceBuilder got an error while processing the [possibly virtual] file "
+ filePair.first() + " within Path " + p , t);
}
} finally {
inputDemuxer.close();
}
}
if (jobBuilder != null) {
traceWriter.output(jobBuilder.build());
jobBuilder = null;
} else {
LOG.warn("No job found in traces: ");
}
topologyWriter.output(topologyBuilder.build());
} finally {
traceWriter.close();
topologyWriter.close();
}
return 0;
}
private void processJobConf(Properties properties, JobBuilder jobBuilder) {
jobBuilder.process(properties);
topologyBuilder.process(properties);
}
void processJobHistory(JobHistoryParser parser, JobBuilder jobBuilder)
throws IOException {
HistoryEvent e;
while ((e = parser.nextEvent()) != null) {
jobBuilder.process(e);
topologyBuilder.process(e);
}
parser.close();
}
void finish() {
IOUtils.cleanup(LOG, traceWriter, topologyWriter);
}
}
| 10,591 | 32.518987 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TopologyBuilder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.Set;
import java.util.HashSet;
import java.util.Properties;
import java.util.StringTokenizer;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.jobhistory.MapAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
/**
* Building the cluster topology.
*/
public class TopologyBuilder {
private Set<ParsedHost> allHosts = new HashSet<ParsedHost>();
/**
* Process one {@link HistoryEvent}
*
* @param event
* The {@link HistoryEvent} to be processed.
*/
public void process(HistoryEvent event) {
if (event instanceof TaskAttemptFinishedEvent) {
processTaskAttemptFinishedEvent((TaskAttemptFinishedEvent) event);
} else if (event instanceof TaskAttemptUnsuccessfulCompletionEvent) {
processTaskAttemptUnsuccessfulCompletionEvent((TaskAttemptUnsuccessfulCompletionEvent) event);
} else if (event instanceof TaskStartedEvent) {
processTaskStartedEvent((TaskStartedEvent) event);
} else if (event instanceof MapAttemptFinishedEvent) {
processMapAttemptFinishedEvent((MapAttemptFinishedEvent) event);
} else if (event instanceof ReduceAttemptFinishedEvent) {
processReduceAttemptFinishedEvent((ReduceAttemptFinishedEvent) event);
}
// I do NOT expect these if statements to be exhaustive.
}
/**
* Process a collection of JobConf {@link Properties}. We do not restrict it
* to be called once.
*
* @param conf
* The job conf properties to be added.
*/
public void process(Properties conf) {
// no code
}
/**
* Request the builder to build the final object. Once called, the
* {@link TopologyBuilder} would accept no more events or job-conf properties.
*
* @return Parsed {@link LoggedNetworkTopology} object.
*/
public LoggedNetworkTopology build() {
return new LoggedNetworkTopology(allHosts);
}
private void processTaskStartedEvent(TaskStartedEvent event) {
preferredLocationForSplits(event.getSplitLocations());
}
private void processTaskAttemptUnsuccessfulCompletionEvent(
TaskAttemptUnsuccessfulCompletionEvent event) {
recordParsedHost(event.getHostname(), event.getRackName());
}
private void processTaskAttemptFinishedEvent(TaskAttemptFinishedEvent event) {
recordParsedHost(event.getHostname(), event.getRackName());
}
private void processMapAttemptFinishedEvent(MapAttemptFinishedEvent event) {
recordParsedHost(event.getHostname(), event.getRackName());
}
private void processReduceAttemptFinishedEvent(ReduceAttemptFinishedEvent event) {
recordParsedHost(event.getHostname(), event.getRackName());
}
private void recordParsedHost(String hostName, String rackName) {
if (hostName == null) {
return;
}
ParsedHost result = null;
if (rackName == null) {
result = ParsedHost.parse(hostName);
} else {
result = new ParsedHost(rackName, hostName);
}
if (result != null && !allHosts.contains(result)) {
allHosts.add(result);
}
}
private void recordParsedHost(String nodeName) {
ParsedHost result = ParsedHost.parse(nodeName);
if (result != null && !allHosts.contains(result)) {
allHosts.add(result);
}
}
private void preferredLocationForSplits(String splits) {
if (splits != null) {
StringTokenizer tok = new StringTokenizer(splits, ",", false);
while (tok.hasMoreTokens()) {
String nextSplit = tok.nextToken();
recordParsedHost(nextSplit);
}
}
}
}
| 4,667 | 32.582734 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.List;
import org.apache.hadoop.mapred.TaskStatus.State;
/**
* {@link ReduceTaskAttemptInfo} represents the information with regard to a
* reduce task attempt.
*/
public class ReduceTaskAttemptInfo extends TaskAttemptInfo {
private long shuffleTime;
private long mergeTime;
private long reduceTime;
public ReduceTaskAttemptInfo(State state, TaskInfo taskInfo, long shuffleTime,
long mergeTime, long reduceTime, List<List<Integer>> allSplits) {
super(state, taskInfo,
allSplits == null
? LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector()
: allSplits);
this.shuffleTime = shuffleTime;
this.mergeTime = mergeTime;
this.reduceTime = reduceTime;
}
/**
*
* @deprecated please use the constructor with
* {@code (state, taskInfo, shuffleTime, mergeTime, reduceTime
* List<List<Integer>> allSplits)}
* instead.
*
* see {@link LoggedTaskAttempt} for an explanation of
* {@code allSplits}.
*
* If there are no known splits, use {@code null}.
*/
@Deprecated
public ReduceTaskAttemptInfo(State state, TaskInfo taskInfo, long shuffleTime,
long mergeTime, long reduceTime) {
this(state, taskInfo, shuffleTime, mergeTime, reduceTime, null);
}
/**
* Get the runtime for the <b>reduce</b> phase of the reduce task-attempt.
*
* @return the runtime for the <b>reduce</b> phase of the reduce task-attempt
*/
public long getReduceRuntime() {
return reduceTime;
}
/**
* Get the runtime for the <b>shuffle</b> phase of the reduce task-attempt.
*
* @return the runtime for the <b>shuffle</b> phase of the reduce task-attempt
*/
public long getShuffleRuntime() {
return shuffleTime;
}
/**
* Get the runtime for the <b>merge</b> phase of the reduce task-attempt
*
* @return the runtime for the <b>merge</b> phase of the reduce task-attempt
*/
public long getMergeRuntime() {
return mergeTime;
}
@Override
public long getRuntime() {
return (getShuffleRuntime() + getMergeRuntime() + getReduceRuntime());
}
}
| 3,009 | 30.684211 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/SingleEventEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
abstract class SingleEventEmitter {
abstract HistoryEvent maybeEmitEvent(ParsedLine line, String name,
HistoryEventEmitter that);
}
| 1,048 | 39.346154 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.Set;
import java.util.TreeSet;
import org.codehaus.jackson.annotate.JsonAnySetter;
/**
* A {@link LoggedSingleRelativeRanking} represents an X-Y coordinate of a
* single point in a discrete CDF.
*
* All of the public methods are simply accessors for the instance variables we
* want to write out in the JSON files.
*
*/
public class LoggedSingleRelativeRanking implements DeepCompare {
/**
* The Y coordinate, as a fraction {@code ( 0.0D, 1.0D )}. The default value
* is there to mark an unfilled-in value.
*/
double relativeRanking = -1.0D;
/**
* The X coordinate
*/
long datum = -1L;
static private Set<String> alreadySeenAnySetterAttributes =
new TreeSet<String>();
@SuppressWarnings("unused")
// for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println("In LoggedJob, we saw the unknown attribute "
+ attributeName + ".");
}
}
public double getRelativeRanking() {
return relativeRanking;
}
void setRelativeRanking(double relativeRanking) {
this.relativeRanking = relativeRanking;
}
public long getDatum() {
return datum;
}
void setDatum(long datum) {
this.datum = datum;
}
private void compare1(long c1, long c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(double c1, double c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
public void deepCompare(DeepCompare comparand, TreePath loc)
throws DeepInequalityException {
if (!(comparand instanceof LoggedSingleRelativeRanking)) {
throw new DeepInequalityException("comparand has wrong type", loc);
}
LoggedSingleRelativeRanking other = (LoggedSingleRelativeRanking) comparand;
compare1(relativeRanking, other.relativeRanking, loc, "relativeRanking");
compare1(datum, other.datum, loc, "datum");
}
}
| 3,214 | 30.519608 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.text.ParseException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinishedEvent;
public class ReduceAttempt20LineHistoryEventEmitter
extends TaskAttempt20LineEventEmitter {
static List<SingleEventEmitter> nonFinals =
new LinkedList<SingleEventEmitter>();
static List<SingleEventEmitter> finals = new LinkedList<SingleEventEmitter>();
static {
nonFinals.addAll(taskEventNonFinalSEEs);
finals.add(new ReduceAttemptFinishedEventEmitter());
}
ReduceAttempt20LineHistoryEventEmitter() {
super();
}
static private class ReduceAttemptFinishedEventEmitter extends
SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskAttemptIDName,
HistoryEventEmitter thatg) {
if (taskAttemptIDName == null) {
return null;
}
TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptIDName);
String finishTime = line.get("FINISH_TIME");
String status = line.get("TASK_STATUS");
if (finishTime != null && status != null
&& status.equalsIgnoreCase("success")) {
String hostName = line.get("HOSTNAME");
String counters = line.get("COUNTERS");
String state = line.get("STATE_STRING");
String shuffleFinish = line.get("SHUFFLE_FINISHED");
String sortFinish = line.get("SORT_FINISHED");
if (shuffleFinish != null && sortFinish != null
&& "success".equalsIgnoreCase(status)) {
ReduceAttempt20LineHistoryEventEmitter that =
(ReduceAttempt20LineHistoryEventEmitter) thatg;
return new ReduceAttemptFinishedEvent
(taskAttemptID,
that.originalTaskType, status,
Long.parseLong(shuffleFinish),
Long.parseLong(sortFinish),
Long.parseLong(finishTime),
hostName, -1, null,
state, maybeParseCounters(counters),
null);
}
}
return null;
}
}
@Override
List<SingleEventEmitter> finalSEEs() {
return finals;
}
@Override
List<SingleEventEmitter> nonFinalSEEs() {
return nonFinals;
}
}
| 3,262 | 31.306931 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedLine.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
class ParsedLine {
Properties content;
LogRecordType type;
static final String KEY = "(\\w+)";
/**
* The value string is enclosed in double quotation marks ('"') and
* occurrences of '"' and '\' are escaped with a '\'. So the escaped value
* string is essentially a string of escaped sequence ('\' followed by any
* character) or any character other than '"' and '\'.
*
* The straightforward REGEX to capture the above is "((?:[^\"\\\\]|\\\\.)*)".
* Unfortunately Java's REGEX implementation is "broken" that it does not
* perform the NFA-to-DFA conversion and such expressions would lead to
* backtracking and stack overflow when matching with long strings. The
* following is a manual "unfolding" of the REGEX to get rid of backtracking.
*/
static final String VALUE = "([^\"\\\\]*+(?:\\\\.[^\"\\\\]*+)*+)";
/**
* REGEX to match the Key-Value pairs in an input line. Capture group 1
* matches the key and capture group 2 matches the value (without quotation
* marks).
*/
static final Pattern keyValPair = Pattern.compile(KEY + "=" + "\"" + VALUE + "\"");
@SuppressWarnings("unused")
ParsedLine(String fullLine, int version) {
super();
content = new Properties();
int firstSpace = fullLine.indexOf(" ");
if (firstSpace < 0) {
firstSpace = fullLine.length();
}
if (firstSpace == 0) {
return; // This is a junk line of some sort
}
type = LogRecordType.intern(fullLine.substring(0, firstSpace));
String propValPairs = fullLine.substring(firstSpace + 1);
Matcher matcher = keyValPair.matcher(propValPairs);
while(matcher.find()){
String key = matcher.group(1);
String value = matcher.group(2);
content.setProperty(key, value);
}
}
protected LogRecordType getType() {
return type;
}
protected String get(String key) {
return content.getProperty(key);
}
protected long getLong(String key) {
String val = get(key);
return Long.parseLong(val);
}
}
| 2,968 | 31.271739 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Hadoop20JHParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.util.LineReader;
/**
* {@link JobHistoryParser} to parse job histories for hadoop 0.20 (META=1).
*/
public class Hadoop20JHParser implements JobHistoryParser {
final LineReader reader;
static final String endLineString = " .";
static final int internalVersion = 1;
/**
* Can this parser parse the input?
*
* @param input
* @return Whether this parser can parse the input.
* @throws IOException
*
* We will deem a stream to be a good 0.20 job history stream if the
* first line is exactly "Meta VERSION=\"1\" ."
*/
public static boolean canParse(InputStream input) throws IOException {
try {
LineReader reader = new LineReader(input);
Text buffer = new Text();
return reader.readLine(buffer) != 0
&& buffer.toString().equals("Meta VERSION=\"1\" .");
} catch (EOFException e) {
return false;
}
}
public Hadoop20JHParser(InputStream input) throws IOException {
super();
reader = new LineReader(input);
}
public Hadoop20JHParser(LineReader reader) throws IOException {
super();
this.reader = reader;
}
Map<String, HistoryEventEmitter> liveEmitters =
new HashMap<String, HistoryEventEmitter>();
Queue<HistoryEvent> remainingEvents = new LinkedList<HistoryEvent>();
enum LineType {
JOB("Job", "JOBID") {
HistoryEventEmitter createEmitter() {
return new Job20LineHistoryEventEmitter();
}
},
TASK("Task", "TASKID") {
HistoryEventEmitter createEmitter() {
return new Task20LineHistoryEventEmitter();
}
},
MAP_ATTEMPT("MapAttempt", "TASK_ATTEMPT_ID") {
HistoryEventEmitter createEmitter() {
return new MapAttempt20LineHistoryEventEmitter();
}
},
REDUCE_ATTEMPT("ReduceAttempt", "TASK_ATTEMPT_ID") {
HistoryEventEmitter createEmitter() {
return new ReduceAttempt20LineHistoryEventEmitter();
}
};
private LogRecordType type;
private String name;
LineType(String s, String name) {
type = LogRecordType.intern(s);
this.name = name;
}
LogRecordType recordType() {
return type;
}
String getName(ParsedLine line) {
return line.get(name);
}
abstract HistoryEventEmitter createEmitter();
static LineType findLineType(LogRecordType lrt) {
for (LineType lt : LineType.values()) {
if (lt.type == lrt) {
return lt;
}
}
return null;
}
}
@Override
public HistoryEvent nextEvent() {
try {
while (remainingEvents.isEmpty()) {
ParsedLine line = new ParsedLine(getFullLine(), internalVersion);
LineType type = LineType.findLineType(line.getType());
if (type == null) {
continue;
}
String name = type.getName(line);
HistoryEventEmitter emitter = findOrMakeEmitter(name, type);
Pair<Queue<HistoryEvent>, HistoryEventEmitter.PostEmitAction> pair =
emitter.emitterCore(line, name);
if (pair.second() == HistoryEventEmitter.PostEmitAction.REMOVE_HEE) {
liveEmitters.remove(name);
}
remainingEvents = pair.first();
}
return remainingEvents.poll();
} catch (EOFException e) {
return null;
} catch (IOException e) {
return null;
}
}
HistoryEventEmitter findOrMakeEmitter(String name, LineType type) {
HistoryEventEmitter result = liveEmitters.get(name);
if (result == null) {
result = type.createEmitter();
liveEmitters.put(name, result);
}
return result;
}
private String getOneLine() throws IOException {
Text resultText = new Text();
if (reader.readLine(resultText) == 0) {
throw new EOFException("apparent bad line");
}
return resultText.toString();
}
private String getFullLine() throws IOException {
String line = getOneLine();
while (line.length() < endLineString.length()) {
line = getOneLine();
}
if (line.endsWith(endLineString)) {
return line;
}
StringBuilder sb = new StringBuilder(line);
String addedLine;
do {
addedLine = getOneLine();
sb.append("\n");
sb.append(addedLine);
} while (addedLine.length() < endLineString.length()
|| !endLineString.equals(addedLine.substring(addedLine.length()
- endLineString.length())));
return sb.toString();
}
@Override
public void close() throws IOException {
if (reader != null) {
reader.close();
}
}
}
| 5,711 | 25.943396 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.tools.rumen.datatypes.*;
import org.codehaus.jackson.annotate.JsonAnySetter;
/**
* A {@link LoggedDiscreteCDF} is a representation of an hadoop job, with the
* details of this class set up to meet the requirements of the Jackson JSON
* parser/generator.
*
* All of the public methods are simply accessors for the instance variables we
* want to write out in the JSON files.
*
*/
public class LoggedJob implements DeepCompare {
public enum JobType {
JAVA, PIG, STREAMING, PIPES, OVERALL
};
public enum JobPriority {
VERY_LOW, LOW, NORMAL, HIGH, VERY_HIGH
};
static private Set<String> alreadySeenAnySetterAttributes =
new TreeSet<String>();
JobID jobID;
UserName user;
long computonsPerMapInputByte = -1L;
long computonsPerMapOutputByte = -1L;
long computonsPerReduceInputByte = -1L;
long computonsPerReduceOutputByte = -1L;
long submitTime = -1L;
long launchTime = -1L;
long finishTime = -1L;
int heapMegabytes = -1;
int totalMaps = -1;
int totalReduces = -1;
Pre21JobHistoryConstants.Values outcome = null;
JobType jobtype = JobType.JAVA;
JobPriority priority = JobPriority.NORMAL;
List<String> directDependantJobs = new ArrayList<String>();
List<LoggedTask> mapTasks = new ArrayList<LoggedTask>();
List<LoggedTask> reduceTasks = new ArrayList<LoggedTask>();
List<LoggedTask> otherTasks = new ArrayList<LoggedTask>();
// There are CDFs for each level of locality -- most local first
ArrayList<LoggedDiscreteCDF> successfulMapAttemptCDFs;
// There are CDFs for each level of locality -- most local first
ArrayList<LoggedDiscreteCDF> failedMapAttemptCDFs;
LoggedDiscreteCDF successfulReduceAttemptCDF;
LoggedDiscreteCDF failedReduceAttemptCDF;
QueueName queue = null;
JobName jobName = null;
int clusterMapMB = -1;
int clusterReduceMB = -1;
int jobMapMB = -1;
int jobReduceMB = -1;
long relativeTime = 0;
double[] mapperTriesToSucceed;
double failedMapperFraction; // !!!!!
private JobProperties jobProperties = new JobProperties();
LoggedJob() {
}
LoggedJob(String jobID) {
super();
setJobID(jobID);
}
/**
* Set the configuration properties of the job.
*/
void setJobProperties(Properties conf) {
this.jobProperties = new JobProperties(conf);
}
/**
* Get the configuration properties of the job.
*/
public JobProperties getJobProperties() {
return jobProperties;
}
void adjustTimes(long adjustment) {
submitTime += adjustment;
launchTime += adjustment;
finishTime += adjustment;
for (LoggedTask task : mapTasks) {
task.adjustTimes(adjustment);
}
for (LoggedTask task : reduceTasks) {
task.adjustTimes(adjustment);
}
for (LoggedTask task : otherTasks) {
task.adjustTimes(adjustment);
}
}
// for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println("In LoggedJob, we saw the unknown attribute "
+ attributeName + ".");
}
}
public UserName getUser() {
return user;
}
void setUser(String user) {
this.user = new UserName(user);
}
public JobID getJobID() {
return jobID;
}
void setJobID(String jobID) {
this.jobID = JobID.forName(jobID);
}
public JobPriority getPriority() {
return priority;
}
void setPriority(JobPriority priority) {
this.priority = priority;
}
public long getComputonsPerMapInputByte() {
return computonsPerMapInputByte;
}
void setComputonsPerMapInputByte(long computonsPerMapInputByte) {
this.computonsPerMapInputByte = computonsPerMapInputByte;
}
public long getComputonsPerMapOutputByte() {
return computonsPerMapOutputByte;
}
void setComputonsPerMapOutputByte(long computonsPerMapOutputByte) {
this.computonsPerMapOutputByte = computonsPerMapOutputByte;
}
public long getComputonsPerReduceInputByte() {
return computonsPerReduceInputByte;
}
void setComputonsPerReduceInputByte(long computonsPerReduceInputByte) {
this.computonsPerReduceInputByte = computonsPerReduceInputByte;
}
public long getComputonsPerReduceOutputByte() {
return computonsPerReduceOutputByte;
}
void setComputonsPerReduceOutputByte(long computonsPerReduceOutputByte) {
this.computonsPerReduceOutputByte = computonsPerReduceOutputByte; // !!!!!
}
public long getSubmitTime() {
return submitTime;
}
void setSubmitTime(long submitTime) {
this.submitTime = submitTime;
}
public long getLaunchTime() {
return launchTime;
}
void setLaunchTime(long startTime) {
this.launchTime = startTime;
}
public long getFinishTime() {
return finishTime;
}
void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
public int getHeapMegabytes() {
return heapMegabytes;
}
void setHeapMegabytes(int heapMegabytes) {
this.heapMegabytes = heapMegabytes;
}
public int getTotalMaps() {
return totalMaps;
}
void setTotalMaps(int totalMaps) {
this.totalMaps = totalMaps;
}
public int getTotalReduces() {
return totalReduces;
}
void setTotalReduces(int totalReduces) {
this.totalReduces = totalReduces;
}
public Pre21JobHistoryConstants.Values getOutcome() {
return outcome;
}
void setOutcome(Pre21JobHistoryConstants.Values outcome) {
this.outcome = outcome;
}
public JobType getJobtype() {
return jobtype;
}
void setJobtype(JobType jobtype) {
this.jobtype = jobtype;
}
public List<String> getDirectDependantJobs() {
return directDependantJobs;
}
void setDirectDependantJobs(List<String> directDependantJobs) {
this.directDependantJobs = directDependantJobs;
}
public List<LoggedTask> getMapTasks() {
return mapTasks;
}
void setMapTasks(List<LoggedTask> mapTasks) {
this.mapTasks = mapTasks;
}
public List<LoggedTask> getReduceTasks() {
return reduceTasks;
}
void setReduceTasks(List<LoggedTask> reduceTasks) {
this.reduceTasks = reduceTasks;
}
public List<LoggedTask> getOtherTasks() {
return otherTasks;
}
void setOtherTasks(List<LoggedTask> otherTasks) {
this.otherTasks = otherTasks;
}
public ArrayList<LoggedDiscreteCDF> getSuccessfulMapAttemptCDFs() {
return successfulMapAttemptCDFs;
}
void setSuccessfulMapAttemptCDFs(
ArrayList<LoggedDiscreteCDF> successfulMapAttemptCDFs) {
this.successfulMapAttemptCDFs = successfulMapAttemptCDFs;
}
public ArrayList<LoggedDiscreteCDF> getFailedMapAttemptCDFs() {
return failedMapAttemptCDFs;
}
void setFailedMapAttemptCDFs(ArrayList<LoggedDiscreteCDF> failedMapAttemptCDFs) {
this.failedMapAttemptCDFs = failedMapAttemptCDFs;
}
public LoggedDiscreteCDF getSuccessfulReduceAttemptCDF() {
return successfulReduceAttemptCDF;
}
void setSuccessfulReduceAttemptCDF(
LoggedDiscreteCDF successfulReduceAttemptCDF) {
this.successfulReduceAttemptCDF = successfulReduceAttemptCDF;
}
public LoggedDiscreteCDF getFailedReduceAttemptCDF() {
return failedReduceAttemptCDF;
}
void setFailedReduceAttemptCDF(LoggedDiscreteCDF failedReduceAttemptCDF) {
this.failedReduceAttemptCDF = failedReduceAttemptCDF;
}
public double[] getMapperTriesToSucceed() {
return mapperTriesToSucceed;
}
void setMapperTriesToSucceed(double[] mapperTriesToSucceed) {
this.mapperTriesToSucceed = mapperTriesToSucceed;
}
public double getFailedMapperFraction() {
return failedMapperFraction;
}
void setFailedMapperFraction(double failedMapperFraction) {
this.failedMapperFraction = failedMapperFraction;
}
public long getRelativeTime() {
return relativeTime;
}
void setRelativeTime(long relativeTime) {
this.relativeTime = relativeTime;
}
/**
* @return job queue name if it is available in job history file or
* job history conf file. Returns null otherwise.
*/
public QueueName getQueue() {
return queue;
}
void setQueue(String queue) {
this.queue = new QueueName(queue);
}
public JobName getJobName() {
return jobName;
}
void setJobName(String jobName) {
this.jobName = new JobName(jobName);
}
public int getClusterMapMB() {
return clusterMapMB;
}
void setClusterMapMB(int clusterMapMB) {
this.clusterMapMB = clusterMapMB;
}
public int getClusterReduceMB() {
return clusterReduceMB;
}
void setClusterReduceMB(int clusterReduceMB) {
this.clusterReduceMB = clusterReduceMB;
}
public int getJobMapMB() {
return jobMapMB;
}
void setJobMapMB(int jobMapMB) {
this.jobMapMB = jobMapMB;
}
public int getJobReduceMB() {
return jobReduceMB;
}
void setJobReduceMB(int jobReduceMB) {
this.jobReduceMB = jobReduceMB;
}
private void compare1(String c1, String c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || !c1.equals(c2)) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(long c1, long c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(Pre21JobHistoryConstants.Values c1,
Pre21JobHistoryConstants.Values c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(JobType c1, JobType c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(JobPriority c1, JobPriority c2, TreePath loc,
String eltname) throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(int c1, int c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(double c1, double c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(double[] c1, double[] c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
TreePath recursePath = new TreePath(loc, eltname);
if (c1 == null || c2 == null || c1.length != c2.length) {
throw new DeepInequalityException(eltname + " miscompared", recursePath);
}
for (int i = 0; i < c1.length; ++i) {
if (c1[i] != c2[i]) {
throw new DeepInequalityException(eltname + " miscompared",
new TreePath(loc, eltname, i));
}
}
}
private void compare1(DeepCompare c1, DeepCompare c2, TreePath loc,
String eltname, int index) throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
TreePath recursePath = new TreePath(loc, eltname, index);
if (c1 == null || c2 == null) {
if (index == -1) {
throw new DeepInequalityException(eltname + " miscompared", recursePath);
} else {
throw new DeepInequalityException(eltname + "[" + index
+ "] miscompared", recursePath);
}
}
c1.deepCompare(c2, recursePath);
}
// I'll treat this as an atomic object type
private void compareStrings(List<String> c1, List<String> c2, TreePath loc,
String eltname) throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
TreePath recursePath = new TreePath(loc, eltname);
if (c1 == null || c2 == null || !c1.equals(c2)) {
throw new DeepInequalityException(eltname + " miscompared", recursePath);
}
}
private void compareLoggedTasks(List<LoggedTask> c1, List<LoggedTask> c2,
TreePath loc, String eltname) throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || c1.size() != c2.size()) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
for (int i = 0; i < c1.size(); ++i) {
c1.get(i).deepCompare(c2.get(i), new TreePath(loc, eltname, i));
}
}
private void compareCDFs(List<LoggedDiscreteCDF> c1,
List<LoggedDiscreteCDF> c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || c1.size() != c2.size()) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
for (int i = 0; i < c1.size(); ++i) {
c1.get(i).deepCompare(c2.get(i), new TreePath(loc, eltname, i));
}
}
private void compareJobProperties(JobProperties jprop1, JobProperties jprop2,
TreePath loc, String eltname)
throws DeepInequalityException {
if (jprop1 == null && jprop2 == null) {
return;
}
if (jprop1 == null || jprop2 == null) {
throw new DeepInequalityException(eltname + " miscompared",
new TreePath(loc, eltname));
}
Properties prop1 = jprop1.getValue();
Properties prop2 = jprop2.getValue();
if (prop1.size() != prop2.size()) {
throw new DeepInequalityException(eltname + " miscompared [size]",
new TreePath(loc, eltname));
}
for (Map.Entry<Object, Object> entry : prop1.entrySet()) {
String v1 = entry.getValue().toString();
String v2 = prop2.get(entry.getKey()).toString();
compare1(v1, v2, new TreePath(loc, eltname), "key:" + entry.getKey());
}
}
private void compare1(DataType<String> c1, DataType<String> c2, TreePath loc,
String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null) {
throw new DeepInequalityException(eltname + " miscompared",
new TreePath(loc, eltname));
}
TreePath dtPath = new TreePath(loc, eltname);
if (!c1.getClass().getName().equals(c2.getClass().getName())) {
throw new DeepInequalityException(eltname + " miscompared",
new TreePath(dtPath, "class"));
}
compare1(c1.getValue(), c2.getValue(), dtPath, "value");
}
public void deepCompare(DeepCompare comparand, TreePath loc)
throws DeepInequalityException {
if (!(comparand instanceof LoggedJob)) {
throw new DeepInequalityException("comparand has wrong type", loc);
}
LoggedJob other = (LoggedJob) comparand;
compare1(jobID.toString(), other.jobID.toString(), loc, "jobID");
compare1(user, other.user, loc, "user");
compare1(computonsPerMapInputByte, other.computonsPerMapInputByte, loc,
"computonsPerMapInputByte");
compare1(computonsPerMapOutputByte, other.computonsPerMapOutputByte, loc,
"computonsPerMapOutputByte");
compare1(computonsPerReduceInputByte, other.computonsPerReduceInputByte,
loc, "computonsPerReduceInputByte");
compare1(computonsPerReduceOutputByte, other.computonsPerReduceOutputByte,
loc, "computonsPerReduceOutputByte");
compare1(submitTime, other.submitTime, loc, "submitTime");
compare1(launchTime, other.launchTime, loc, "launchTime");
compare1(finishTime, other.finishTime, loc, "finishTime");
compare1(heapMegabytes, other.heapMegabytes, loc, "heapMegabytes");
compare1(totalMaps, other.totalMaps, loc, "totalMaps");
compare1(totalReduces, other.totalReduces, loc, "totalReduces");
compare1(outcome, other.outcome, loc, "outcome");
compare1(jobtype, other.jobtype, loc, "jobtype");
compare1(priority, other.priority, loc, "priority");
compareStrings(directDependantJobs, other.directDependantJobs, loc,
"directDependantJobs");
compareLoggedTasks(mapTasks, other.mapTasks, loc, "mapTasks");
compareLoggedTasks(reduceTasks, other.reduceTasks, loc, "reduceTasks");
compareLoggedTasks(otherTasks, other.otherTasks, loc, "otherTasks");
compare1(relativeTime, other.relativeTime, loc, "relativeTime");
compareCDFs(successfulMapAttemptCDFs, other.successfulMapAttemptCDFs, loc,
"successfulMapAttemptCDFs");
compareCDFs(failedMapAttemptCDFs, other.failedMapAttemptCDFs, loc,
"failedMapAttemptCDFs");
compare1(successfulReduceAttemptCDF, other.successfulReduceAttemptCDF, loc,
"successfulReduceAttemptCDF", -1);
compare1(failedReduceAttemptCDF, other.failedReduceAttemptCDF, loc,
"failedReduceAttemptCDF", -1);
compare1(mapperTriesToSucceed, other.mapperTriesToSucceed, loc,
"mapperTriesToSucceed");
compare1(failedMapperFraction, other.failedMapperFraction, loc,
"failedMapperFraction");
compare1(queue, other.queue, loc, "queue");
compare1(jobName, other.jobName, loc, "jobName");
compare1(clusterMapMB, other.clusterMapMB, loc, "clusterMapMB");
compare1(clusterReduceMB, other.clusterReduceMB, loc, "clusterReduceMB");
compare1(jobMapMB, other.jobMapMB, loc, "jobMapMB");
compare1(jobReduceMB, other.jobReduceMB, loc, "jobReduceMB");
// compare the job configuration parameters
compareJobProperties(jobProperties, other.getJobProperties(), loc,
"JobProperties");
}
}
| 19,293 | 27.373529 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.annotate.JsonAnySetter;
// HACK ALERT!!! This "should" have have two subclasses, which might be called
// LoggedMapTaskAttempt and LoggedReduceTaskAttempt, but
// the Jackson implementation of JSON doesn't handle a
// superclass-valued field.
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.jobhistory.JhCounter;
import org.apache.hadoop.mapreduce.jobhistory.JhCounterGroup;
import org.apache.hadoop.mapreduce.jobhistory.JhCounters;
import org.apache.hadoop.tools.rumen.datatypes.NodeName;
/**
* A {@link LoggedTaskAttempt} represents an attempt to run an hadoop task in a
* hadoop job. Note that a task can have several attempts.
*
* All of the public methods are simply accessors for the instance variables we
* want to write out in the JSON files.
*
*/
public class LoggedTaskAttempt implements DeepCompare {
TaskAttemptID attemptID;
Pre21JobHistoryConstants.Values result;
long startTime = -1L;
long finishTime = -1L;
NodeName hostName;
long hdfsBytesRead = -1L;
long hdfsBytesWritten = -1L;
long fileBytesRead = -1L;
long fileBytesWritten = -1L;
long mapInputRecords = -1L;
long mapInputBytes = -1L;
long mapOutputBytes = -1L;
long mapOutputRecords = -1L;
long combineInputRecords = -1L;
long reduceInputGroups = -1L;
long reduceInputRecords = -1L;
long reduceShuffleBytes = -1L;
long reduceOutputRecords = -1L;
long spilledRecords = -1L;
long shuffleFinished = -1L;
long sortFinished = -1L;
LoggedLocation location;
// Initialize to default object for backward compatibility
ResourceUsageMetrics metrics = new ResourceUsageMetrics();
List<Integer> clockSplits = new ArrayList<Integer>();
List<Integer> cpuUsages = new ArrayList<Integer>();
List<Integer> vMemKbytes = new ArrayList<Integer>();
List<Integer> physMemKbytes = new ArrayList<Integer>();
LoggedTaskAttempt() {
super();
}
// carries the kinds of splits vectors a LoggedTaskAttempt holds.
//
// Each enumeral has the following methods:
// get(LoggedTaskAttempt attempt)
// returns a List<Integer> with the corresponding value field
// set(LoggedTaskAttempt attempt, List<Integer> newValue)
// sets the value
// There is also a pair of methods get(List<List<Integer>>) and
// set(List<List<Integer>>, List<Integer>) which correspondingly
// delivers or sets the appropriate element of the
// List<List<Integer>> .
// This makes it easier to add another kind in the future.
public enum SplitVectorKind {
WALLCLOCK_TIME {
@Override
public List<Integer> get(LoggedTaskAttempt attempt) {
return attempt.getClockSplits();
}
@Override
public void set(LoggedTaskAttempt attempt, List<Integer> newValue) {
attempt.setClockSplits(newValue);
}
},
CPU_USAGE {
@Override
public List<Integer> get(LoggedTaskAttempt attempt) {
return attempt.getCpuUsages();
}
@Override
public void set(LoggedTaskAttempt attempt, List<Integer> newValue) {
attempt.setCpuUsages(newValue);
}
},
VIRTUAL_MEMORY_KBYTES {
@Override
public List<Integer> get(LoggedTaskAttempt attempt) {
return attempt.getVMemKbytes();
}
@Override
public void set(LoggedTaskAttempt attempt, List<Integer> newValue) {
attempt.setVMemKbytes(newValue);
}
},
PHYSICAL_MEMORY_KBYTES {
@Override
public List<Integer> get(LoggedTaskAttempt attempt) {
return attempt.getPhysMemKbytes();
}
@Override
public void set(LoggedTaskAttempt attempt, List<Integer> newValue) {
attempt.setPhysMemKbytes(newValue);
}
};
static private final List<List<Integer>> NULL_SPLITS_VECTOR
= new ArrayList<List<Integer>>();
static {
for (SplitVectorKind kind : SplitVectorKind.values() ) {
NULL_SPLITS_VECTOR.add(new ArrayList<Integer>());
}
}
abstract public List<Integer> get(LoggedTaskAttempt attempt);
abstract public void set(LoggedTaskAttempt attempt, List<Integer> newValue);
public List<Integer> get(List<List<Integer>> listSplits) {
return listSplits.get(this.ordinal());
}
public void set(List<List<Integer>> listSplits, List<Integer> newValue) {
listSplits.set(this.ordinal(), newValue);
}
static public List<List<Integer>> getNullSplitsVector() {
return NULL_SPLITS_VECTOR;
}
}
/**
*
* @return a list of all splits vectors, ordered in enumeral order
* within {@link SplitVectorKind} . Do NOT use hard-coded
* indices within the return for this with a hard-coded
* index to get individual values; use
* {@code SplitVectorKind.get(LoggedTaskAttempt)} instead.
*/
public List<List<Integer>> allSplitVectors() {
List<List<Integer>> result
= new ArrayList<List<Integer>>(SplitVectorKind.values().length);
for (SplitVectorKind kind : SplitVectorKind.values() ) {
result.add(kind.get(this));
}
return result;
}
static private Set<String> alreadySeenAnySetterAttributes =
new TreeSet<String>();
// for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println("In LoggedJob, we saw the unknown attribute "
+ attributeName + ".");
}
}
public List<Integer> getClockSplits() {
return clockSplits;
}
void setClockSplits(List<Integer> clockSplits) {
this.clockSplits = clockSplits;
}
void arraySetClockSplits(int[] clockSplits) {
List<Integer> result = new ArrayList<Integer>();
for (int i = 0; i < clockSplits.length; ++i) {
result.add(clockSplits[i]);
}
this.clockSplits = result;
}
public List<Integer> getCpuUsages() {
return cpuUsages;
}
void setCpuUsages(List<Integer> cpuUsages) {
this.cpuUsages = cpuUsages;
}
void arraySetCpuUsages(int[] cpuUsages) {
List<Integer> result = new ArrayList<Integer>();
for (int i = 0; i < cpuUsages.length; ++i) {
result.add(cpuUsages[i]);
}
this.cpuUsages = result;
}
public List<Integer> getVMemKbytes() {
return vMemKbytes;
}
void setVMemKbytes(List<Integer> vMemKbytes) {
this.vMemKbytes = vMemKbytes;
}
void arraySetVMemKbytes(int[] vMemKbytes) {
List<Integer> result = new ArrayList<Integer>();
for (int i = 0; i < vMemKbytes.length; ++i) {
result.add(vMemKbytes[i]);
}
this.vMemKbytes = result;
}
public List<Integer> getPhysMemKbytes() {
return physMemKbytes;
}
void setPhysMemKbytes(List<Integer> physMemKbytes) {
this.physMemKbytes = physMemKbytes;
}
void arraySetPhysMemKbytes(int[] physMemKbytes) {
List<Integer> result = new ArrayList<Integer>();
for (int i = 0; i < physMemKbytes.length; ++i) {
result.add(physMemKbytes[i]);
}
this.physMemKbytes = result;
}
void adjustTimes(long adjustment) {
startTime += adjustment;
finishTime += adjustment;
// For reduce attempts, adjust the different phases' finish times also
if (sortFinished >= 0) {
shuffleFinished += adjustment;
sortFinished += adjustment;
}
}
public long getShuffleFinished() {
return shuffleFinished;
}
void setShuffleFinished(long shuffleFinished) {
this.shuffleFinished = shuffleFinished;
}
public long getSortFinished() {
return sortFinished;
}
void setSortFinished(long sortFinished) {
this.sortFinished = sortFinished;
}
public TaskAttemptID getAttemptID() {
return attemptID;
}
void setAttemptID(String attemptID) {
this.attemptID = TaskAttemptID.forName(attemptID);
}
public Pre21JobHistoryConstants.Values getResult() {
return result;
}
void setResult(Pre21JobHistoryConstants.Values result) {
this.result = result;
}
public long getStartTime() {
return startTime;
}
void setStartTime(long startTime) {
this.startTime = startTime;
}
public long getFinishTime() {
return finishTime;
}
void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
public NodeName getHostName() {
return hostName;
}
// This is needed for JSON deserialization
void setHostName(String hostName) {
this.hostName = hostName == null ? null : new NodeName(hostName);
}
// In job-history, hostName is saved in the format rackName/NodeName
//TODO this is a hack! The '/' handling needs fixing.
void setHostName(String hostName, String rackName) {
if (hostName == null || hostName.length() == 0) {
throw new RuntimeException("Invalid entry! Missing hostname");
} else if (rackName == null || rackName.length() == 0) {
setHostName(hostName);
} else {
// make sure that the rackname is prefixed with a '/'
if (!rackName.startsWith("/")) {
rackName = "/" + rackName;
}
// make sure that the hostname is prefixed with a '/'
if (!hostName.startsWith("/")) {
hostName = "/" + hostName;
}
setHostName(rackName.intern() + hostName.intern());
}
}
public long getHdfsBytesRead() {
return hdfsBytesRead;
}
void setHdfsBytesRead(long hdfsBytesRead) {
this.hdfsBytesRead = hdfsBytesRead;
}
public long getHdfsBytesWritten() {
return hdfsBytesWritten;
}
void setHdfsBytesWritten(long hdfsBytesWritten) {
this.hdfsBytesWritten = hdfsBytesWritten;
}
public long getFileBytesRead() {
return fileBytesRead;
}
void setFileBytesRead(long fileBytesRead) {
this.fileBytesRead = fileBytesRead;
}
public long getFileBytesWritten() {
return fileBytesWritten;
}
void setFileBytesWritten(long fileBytesWritten) {
this.fileBytesWritten = fileBytesWritten;
}
public long getMapInputRecords() {
return mapInputRecords;
}
void setMapInputRecords(long mapInputRecords) {
this.mapInputRecords = mapInputRecords;
}
public long getMapOutputBytes() {
return mapOutputBytes;
}
void setMapOutputBytes(long mapOutputBytes) {
this.mapOutputBytes = mapOutputBytes;
}
public long getMapOutputRecords() {
return mapOutputRecords;
}
void setMapOutputRecords(long mapOutputRecords) {
this.mapOutputRecords = mapOutputRecords;
}
public long getCombineInputRecords() {
return combineInputRecords;
}
void setCombineInputRecords(long combineInputRecords) {
this.combineInputRecords = combineInputRecords;
}
public long getReduceInputGroups() {
return reduceInputGroups;
}
void setReduceInputGroups(long reduceInputGroups) {
this.reduceInputGroups = reduceInputGroups;
}
public long getReduceInputRecords() {
return reduceInputRecords;
}
void setReduceInputRecords(long reduceInputRecords) {
this.reduceInputRecords = reduceInputRecords;
}
public long getReduceShuffleBytes() {
return reduceShuffleBytes;
}
void setReduceShuffleBytes(long reduceShuffleBytes) {
this.reduceShuffleBytes = reduceShuffleBytes;
}
public long getReduceOutputRecords() {
return reduceOutputRecords;
}
void setReduceOutputRecords(long reduceOutputRecords) {
this.reduceOutputRecords = reduceOutputRecords;
}
public long getSpilledRecords() {
return spilledRecords;
}
void setSpilledRecords(long spilledRecords) {
this.spilledRecords = spilledRecords;
}
public LoggedLocation getLocation() {
return location;
}
void setLocation(LoggedLocation location) {
this.location = location;
}
public long getMapInputBytes() {
return mapInputBytes;
}
void setMapInputBytes(long mapInputBytes) {
this.mapInputBytes = mapInputBytes;
}
// incorporate event counters
public void incorporateCounters(JhCounters counters) {
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.hdfsBytesRead = val;
}
}, counters, "HDFS_BYTES_READ");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.hdfsBytesWritten = val;
}
}, counters, "HDFS_BYTES_WRITTEN");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.fileBytesRead = val;
}
}, counters, "FILE_BYTES_READ");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.fileBytesWritten = val;
}
}, counters, "FILE_BYTES_WRITTEN");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.mapInputBytes = val;
}
}, counters, "MAP_INPUT_BYTES");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.mapInputRecords = val;
}
}, counters, "MAP_INPUT_RECORDS");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.mapOutputBytes = val;
}
}, counters, "MAP_OUTPUT_BYTES");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.mapOutputRecords = val;
}
}, counters, "MAP_OUTPUT_RECORDS");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.combineInputRecords = val;
}
}, counters, "COMBINE_INPUT_RECORDS");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.reduceInputGroups = val;
}
}, counters, "REDUCE_INPUT_GROUPS");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.reduceInputRecords = val;
}
}, counters, "REDUCE_INPUT_RECORDS");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.reduceShuffleBytes = val;
}
}, counters, "REDUCE_SHUFFLE_BYTES");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.reduceOutputRecords = val;
}
}, counters, "REDUCE_OUTPUT_RECORDS");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
attempt.spilledRecords = val;
}
}, counters, "SPILLED_RECORDS");
// incorporate CPU usage
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
metrics.setCumulativeCpuUsage(val);
}
}, counters, "CPU_MILLISECONDS");
// incorporate virtual memory usage
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
metrics.setVirtualMemoryUsage(val);
}
}, counters, "VIRTUAL_MEMORY_BYTES");
// incorporate physical memory usage
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
metrics.setPhysicalMemoryUsage(val);
}
}, counters, "PHYSICAL_MEMORY_BYTES");
// incorporate heap usage
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
metrics.setHeapUsage(val);
}
}, counters, "COMMITTED_HEAP_BYTES");
}
// Get the resource usage metrics
public ResourceUsageMetrics getResourceUsageMetrics() {
return metrics;
}
// Set the resource usage metrics
void setResourceUsageMetrics(ResourceUsageMetrics metrics) {
this.metrics = metrics;
}
private static String canonicalizeCounterName(String nonCanonicalName) {
String result = StringUtils.toLowerCase(nonCanonicalName);
result = result.replace(' ', '|');
result = result.replace('-', '|');
result = result.replace('_', '|');
result = result.replace('.', '|');
return result;
}
private abstract class SetField {
LoggedTaskAttempt attempt;
SetField(LoggedTaskAttempt attempt) {
this.attempt = attempt;
}
abstract void set(long value);
}
private static void incorporateCounter(SetField thunk, JhCounters counters,
String counterName) {
counterName = canonicalizeCounterName(counterName);
for (JhCounterGroup group : counters.groups) {
for (JhCounter counter : group.counts) {
if (counterName
.equals(canonicalizeCounterName(counter.name.toString()))) {
thunk.set(counter.value);
return;
}
}
}
}
private void compare1(String c1, String c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || !c1.equals(c2)) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(NodeName c1, NodeName c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
compare1(c1.getValue(), c2.getValue(), new TreePath(loc, eltname), "value");
}
private void compare1(long c1, long c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(Pre21JobHistoryConstants.Values c1,
Pre21JobHistoryConstants.Values c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(LoggedLocation c1, LoggedLocation c2, TreePath loc,
String eltname) throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
TreePath recurse = new TreePath(loc, eltname);
if (c1 == null || c2 == null) {
throw new DeepInequalityException(eltname + " miscompared", recurse);
}
c1.deepCompare(c2, recurse);
}
private void compare1(List<Integer> c1, List<Integer> c2, TreePath loc,
String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || c1.size() != c2.size()) {
throw new DeepInequalityException
(eltname + " miscompared", new TreePath(loc, eltname));
}
for (int i = 0; i < c1.size(); ++i) {
if (!c1.get(i).equals(c2.get(i))) {
throw new DeepInequalityException("" + c1.get(i) + " != " + c2.get(i),
new TreePath(loc, eltname, i));
}
}
}
public void deepCompare(DeepCompare comparand, TreePath loc)
throws DeepInequalityException {
if (!(comparand instanceof LoggedTaskAttempt)) {
throw new DeepInequalityException("comparand has wrong type", loc);
}
LoggedTaskAttempt other = (LoggedTaskAttempt) comparand;
compare1(attemptID.toString(), other.attemptID.toString(), loc, "attemptID");
compare1(result, other.result, loc, "result");
compare1(startTime, other.startTime, loc, "startTime");
compare1(finishTime, other.finishTime, loc, "finishTime");
compare1(hostName, other.hostName, loc, "hostName");
compare1(hdfsBytesRead, other.hdfsBytesRead, loc, "hdfsBytesRead");
compare1(hdfsBytesWritten, other.hdfsBytesWritten, loc, "hdfsBytesWritten");
compare1(fileBytesRead, other.fileBytesRead, loc, "fileBytesRead");
compare1(fileBytesWritten, other.fileBytesWritten, loc, "fileBytesWritten");
compare1(mapInputBytes, other.mapInputBytes, loc, "mapInputBytes");
compare1(mapInputRecords, other.mapInputRecords, loc, "mapInputRecords");
compare1(mapOutputBytes, other.mapOutputBytes, loc, "mapOutputBytes");
compare1(mapOutputRecords, other.mapOutputRecords, loc, "mapOutputRecords");
compare1(combineInputRecords, other.combineInputRecords, loc,
"combineInputRecords");
compare1(reduceInputGroups, other.reduceInputGroups, loc,
"reduceInputGroups");
compare1(reduceInputRecords, other.reduceInputRecords, loc,
"reduceInputRecords");
compare1(reduceShuffleBytes, other.reduceShuffleBytes, loc,
"reduceShuffleBytes");
compare1(reduceOutputRecords, other.reduceOutputRecords, loc,
"reduceOutputRecords");
compare1(spilledRecords, other.spilledRecords, loc, "spilledRecords");
compare1(shuffleFinished, other.shuffleFinished, loc, "shuffleFinished");
compare1(sortFinished, other.sortFinished, loc, "sortFinished");
compare1(location, other.location, loc, "location");
compare1(clockSplits, other.clockSplits, loc, "clockSplits");
compare1(cpuUsages, other.cpuUsages, loc, "cpuUsages");
compare1(vMemKbytes, other.vMemKbytes, loc, "vMemKbytes");
compare1(physMemKbytes, other.physMemKbytes, loc, "physMemKbytes");
}
}
| 22,173 | 27.722798 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedTaskAttempt.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.jobhistory.JhCounters;
/**
* This is a wrapper class around {@link LoggedTaskAttempt}. This provides
* also the extra information about the task attempt obtained from
* job history which is not written to the JSON trace file.
*/
public class ParsedTaskAttempt extends LoggedTaskAttempt {
private static final Log LOG = LogFactory.getLog(ParsedTaskAttempt.class);
private String diagnosticInfo;
private String trackerName;
private Integer httpPort, shufflePort;
private Map<String, Long> countersMap = new HashMap<String, Long>();
ParsedTaskAttempt() {
super();
}
/** incorporate event counters */
public void incorporateCounters(JhCounters counters) {
Map<String, Long> countersMap =
JobHistoryUtils.extractCounters(counters);
putCounters(countersMap);
super.incorporateCounters(counters);
}
/** Set the task attempt counters */
public void putCounters(Map<String, Long> counters) {
this.countersMap = counters;
}
/**
* @return the task attempt counters
*/
public Map<String, Long> obtainCounters() {
return countersMap;
}
/** Set the task attempt diagnostic-info */
public void putDiagnosticInfo(String msg) {
diagnosticInfo = msg;
}
/**
* @return the diagnostic-info of this task attempt.
* If the attempt is successful, returns null.
*/
public String obtainDiagnosticInfo() {
return diagnosticInfo;
}
void putTrackerName(String trackerName) {
this.trackerName = trackerName;
}
public String obtainTrackerName() {
return trackerName;
}
void putHttpPort(int port) {
httpPort = port;
}
/**
* @return http port if set. Returns null otherwise.
*/
public Integer obtainHttpPort() {
return httpPort;
}
void putShufflePort(int port) {
shufflePort = port;
}
/**
* @return shuffle port if set. Returns null otherwise.
*/
public Integer obtainShufflePort() {
return shufflePort;
}
/** Dump the extra info of ParsedTaskAttempt */
void dumpParsedTaskAttempt() {
LOG.info("ParsedTaskAttempt details:" + obtainCounters()
+ ";DiagnosticInfo=" + obtainDiagnosticInfo() + "\n"
+ obtainTrackerName() + ";" + obtainHttpPort() + ";"
+ obtainShufflePort() + ";rack=" + getHostName().getRackName()
+ ";host=" + getHostName().getHostName());
}
}
| 3,375 | 27.133333 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/InputDemuxer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* {@link InputDemuxer} dem-ultiplexes the input files into individual input
* streams.
*/
public interface InputDemuxer extends Closeable {
/**
* Bind the {@link InputDemuxer} to a particular file.
*
* @param path
* The path to the file it should bind to.
* @param conf
* Configuration
* @throws IOException
*
* Returns true when the binding succeeds. If the file can be read
* but is in the wrong format, returns false. IOException is
* reserved for read errors.
*/
public void bindTo(Path path, Configuration conf) throws IOException;
/**
* Get the next <name, input> pair. The name should preserve the original job
* history file or job conf file name. The input object should be closed
* before calling getNext() again. The old input object would be invalid after
* calling getNext() again.
*
* @return the next <name, input> pair.
*/
public Pair<String, InputStream> getNext() throws IOException;
}
| 2,050 | 34.982456 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.security.authorize.AccessControlList;
/**
* This is a wrapper class around {@link LoggedJob}. This provides also the
* extra information about the job obtained from job history which is not
* written to the JSON trace file.
*/
public class ParsedJob extends LoggedJob {
private static final Log LOG = LogFactory.getLog(ParsedJob.class);
private Map<String, Long> totalCountersMap = new HashMap<String, Long>();
private Map<String, Long> mapCountersMap = new HashMap<String, Long>();
private Map<String, Long> reduceCountersMap = new HashMap<String, Long>();
private String jobConfPath;
private Map<JobACL, AccessControlList> jobAcls;
ParsedJob() {
}
ParsedJob(String jobID) {
super();
setJobID(jobID);
}
/** Set the job total counters */
void putTotalCounters(Map<String, Long> totalCounters) {
this.totalCountersMap = totalCounters;
}
/**
* @return the job total counters
*/
public Map<String, Long> obtainTotalCounters() {
return totalCountersMap;
}
/** Set the job level map tasks' counters */
void putMapCounters(Map<String, Long> mapCounters) {
this.mapCountersMap = mapCounters;
}
/**
* @return the job level map tasks' counters
*/
public Map<String, Long> obtainMapCounters() {
return mapCountersMap;
}
/** Set the job level reduce tasks' counters */
void putReduceCounters(Map<String, Long> reduceCounters) {
this.reduceCountersMap = reduceCounters;
}
/**
* @return the job level reduce tasks' counters
*/
public Map<String, Long> obtainReduceCounters() {
return reduceCountersMap;
}
/** Set the job conf path in staging dir on hdfs */
void putJobConfPath(String confPath) {
jobConfPath = confPath;
}
/**
* @return the job conf path in staging dir on hdfs
*/
public String obtainJobConfpath() {
return jobConfPath;
}
/** Set the job acls */
void putJobAcls(Map<JobACL, AccessControlList> acls) {
jobAcls = acls;
}
/**
* @return the job acls
*/
public Map<JobACL, AccessControlList> obtainJobAcls() {
return jobAcls;
}
/**
* @return the list of map tasks of this job
*/
public List<ParsedTask> obtainMapTasks() {
List<LoggedTask> tasks = super.getMapTasks();
return convertTasks(tasks);
}
/**
* @return the list of reduce tasks of this job
*/
public List<ParsedTask> obtainReduceTasks() {
List<LoggedTask> tasks = super.getReduceTasks();
return convertTasks(tasks);
}
/**
* @return the list of other tasks of this job
*/
public List<ParsedTask> obtainOtherTasks() {
List<LoggedTask> tasks = super.getOtherTasks();
return convertTasks(tasks);
}
/** As we know that this list of {@link LoggedTask} objects is actually a list
* of {@link ParsedTask} objects, we go ahead and cast them.
* @return the list of {@link ParsedTask} objects
*/
private List<ParsedTask> convertTasks(List<LoggedTask> tasks) {
List<ParsedTask> result = new ArrayList<ParsedTask>();
for (LoggedTask t : tasks) {
if (t instanceof ParsedTask) {
result.add((ParsedTask)t);
} else {
throw new RuntimeException("Unexpected type of tasks in the list...");
}
}
return result;
}
/** Dump the extra info of ParsedJob */
void dumpParsedJob() {
LOG.info("ParsedJob details:" + obtainTotalCounters() + ";"
+ obtainMapCounters() + ";" + obtainReduceCounters()
+ "\n" + obtainJobConfpath() + "\n" + obtainJobAcls()
+ ";Q=" + (getQueue() == null ? "null" : getQueue().getValue()));
List<ParsedTask> maps = obtainMapTasks();
for (ParsedTask task : maps) {
task.dumpParsedTask();
}
List<ParsedTask> reduces = obtainReduceTasks();
for (ParsedTask task : reduces) {
task.dumpParsedTask();
}
List<ParsedTask> others = obtainOtherTasks();
for (ParsedTask task : others) {
task.dumpParsedTask();
}
}
}
| 5,069 | 27.166667 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Folder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Comparator;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Random;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class Folder extends Configured implements Tool {
private long outputDuration = -1;
private long inputCycle = -1;
private double concentration = 1.0;
private long randomSeed = 0; // irrelevant if seeded == false
private boolean seeded = false;
private boolean debug = false;
private boolean allowMissorting = false;
private int skewBufferLength = 0;
private long startsAfter = -1;
static final private Log LOG = LogFactory.getLog(Folder.class);
private DeskewedJobTraceReader reader = null;
private Outputter<LoggedJob> outGen = null;
private List<Path> tempPaths = new LinkedList<Path>();
private Path tempDir = null;
private long firstJobSubmitTime;
private double timeDilation;
private double transcriptionRateFraction;
private int transcriptionRateInteger;
private Random random;
static private final long TICKS_PER_SECOND = 1000L;
// error return codes
static private final int NON_EXISTENT_FILES = 1;
static private final int NO_INPUT_CYCLE_LENGTH = 2;
static private final int EMPTY_JOB_TRACE = 3;
static private final int OUT_OF_ORDER_JOBS = 4;
static private final int ALL_JOBS_SIMULTANEOUS = 5;
static private final int IO_ERROR = 6;
static private final int OTHER_ERROR = 7;
private Set<Closeable> closees = new HashSet<Closeable>();
private Set<Path> deletees = new HashSet<Path>();
static long parseDuration(String durationString) {
String numeral = durationString.substring(0, durationString.length() - 1);
char durationCode = durationString.charAt(durationString.length() - 1);
long result = Integer.parseInt(numeral);
if (result <= 0) {
throw new IllegalArgumentException("Negative durations are not allowed");
}
switch (durationCode) {
case 'D':
case 'd':
return 24L * 60L * 60L * TICKS_PER_SECOND * result;
case 'H':
case 'h':
return 60L * 60L * TICKS_PER_SECOND * result;
case 'M':
case 'm':
return 60L * TICKS_PER_SECOND * result;
case 'S':
case 's':
return TICKS_PER_SECOND * result;
default:
throw new IllegalArgumentException("Missing or invalid duration code");
}
}
private int initialize(String[] args) throws IllegalArgumentException {
String tempDirName = null;
String inputPathName = null;
String outputPathName = null;
for (int i = 0; i < args.length; ++i) {
String thisArg = args[i];
if (thisArg.equalsIgnoreCase("-starts-after")) {
startsAfter = parseDuration(args[++i]);
} else if (thisArg.equalsIgnoreCase("-output-duration")) {
outputDuration = parseDuration(args[++i]);
} else if (thisArg.equalsIgnoreCase("-input-cycle")) {
inputCycle = parseDuration(args[++i]);
} else if (thisArg.equalsIgnoreCase("-concentration")) {
concentration = Double.parseDouble(args[++i]);
} else if (thisArg.equalsIgnoreCase("-debug")) {
debug = true;
} else if (thisArg.equalsIgnoreCase("-allow-missorting")) {
allowMissorting = true;
} else if (thisArg.equalsIgnoreCase("-seed")) {
seeded = true;
randomSeed = Long.parseLong(args[++i]);
} else if (thisArg.equalsIgnoreCase("-skew-buffer-length")) {
skewBufferLength = Integer.parseInt(args[++i]);
} else if (thisArg.equalsIgnoreCase("-temp-directory")) {
tempDirName = args[++i];
} else if (thisArg.equals("") || thisArg.startsWith("-")) {
throw new IllegalArgumentException("Illegal switch argument, "
+ thisArg + " at position " + i);
} else {
inputPathName = thisArg;
outputPathName = args[++i];
if (i != args.length - 1) {
throw new IllegalArgumentException("Too many non-switch arguments");
}
}
}
try {
Configuration conf = getConf();
Path inPath = new Path(inputPathName);
reader =
new DeskewedJobTraceReader(new JobTraceReader(inPath, conf),
skewBufferLength, !allowMissorting);
Path outPath = new Path(outputPathName);
outGen = new DefaultOutputter<LoggedJob>();
outGen.init(outPath, conf);
tempDir =
tempDirName == null ? outPath.getParent() : new Path(tempDirName);
FileSystem fs = tempDir.getFileSystem(getConf());
if (!fs.getFileStatus(tempDir).isDirectory()) {
throw new IOException("Your temp directory is not a directory");
}
if (inputCycle <= 0) {
LOG.error("You must have an input cycle length.");
return NO_INPUT_CYCLE_LENGTH;
}
if (outputDuration <= 0) {
outputDuration = 60L * 60L * TICKS_PER_SECOND;
}
if (inputCycle <= 0) {
inputCycle = outputDuration;
}
timeDilation = (double) outputDuration / (double) inputCycle;
random = seeded ? new Random(randomSeed) : new Random();
if (debug) {
randomSeed = random.nextLong();
LOG.warn("This run effectively has a -seed of " + randomSeed);
random = new Random(randomSeed);
seeded = true;
}
} catch (IOException e) {
e.printStackTrace(System.err);
return NON_EXISTENT_FILES;
}
return 0;
}
@Override
public int run(String[] args) throws IOException {
int result = initialize(args);
if (result != 0) {
return result;
}
return run();
}
public int run() throws IOException {
class JobEntryComparator implements
Comparator<Pair<LoggedJob, JobTraceReader>> {
public int compare(Pair<LoggedJob, JobTraceReader> p1,
Pair<LoggedJob, JobTraceReader> p2) {
LoggedJob j1 = p1.first();
LoggedJob j2 = p2.first();
return (j1.getSubmitTime() < j2.getSubmitTime()) ? -1 : (j1
.getSubmitTime() == j2.getSubmitTime()) ? 0 : 1;
}
}
// we initialize an empty heap so if we take an error before establishing
// a real one the finally code goes through
Queue<Pair<LoggedJob, JobTraceReader>> heap =
new PriorityQueue<Pair<LoggedJob, JobTraceReader>>();
try {
LoggedJob job = reader.nextJob();
if (job == null) {
LOG.error("The job trace is empty");
return EMPTY_JOB_TRACE;
}
// If starts-after time is specified, skip the number of jobs till we reach
// the starting time limit.
if (startsAfter > 0) {
LOG.info("starts-after time is specified. Initial job submit time : "
+ job.getSubmitTime());
long approximateTime = job.getSubmitTime() + startsAfter;
job = reader.nextJob();
long skippedCount = 0;
while (job != null && job.getSubmitTime() < approximateTime) {
job = reader.nextJob();
skippedCount++;
}
LOG.debug("Considering jobs with submit time greater than "
+ startsAfter + " ms. Skipped " + skippedCount + " jobs.");
if (job == null) {
LOG.error("No more jobs to process in the trace with 'starts-after'"+
" set to " + startsAfter + "ms.");
return EMPTY_JOB_TRACE;
}
LOG.info("The first job has a submit time of " + job.getSubmitTime());
}
firstJobSubmitTime = job.getSubmitTime();
long lastJobSubmitTime = firstJobSubmitTime;
int numberJobs = 0;
long currentIntervalEnd = Long.MIN_VALUE;
Path nextSegment = null;
Outputter<LoggedJob> tempGen = null;
if (debug) {
LOG.debug("The first job has a submit time of " + firstJobSubmitTime);
}
final Configuration conf = getConf();
try {
// At the top of this loop, skewBuffer has at most
// skewBufferLength entries.
while (job != null) {
final Random tempNameGenerator = new Random();
lastJobSubmitTime = job.getSubmitTime();
++numberJobs;
if (job.getSubmitTime() >= currentIntervalEnd) {
if (tempGen != null) {
tempGen.close();
}
nextSegment = null;
for (int i = 0; i < 3 && nextSegment == null; ++i) {
try {
nextSegment =
new Path(tempDir, "segment-" + tempNameGenerator.nextLong()
+ ".json.gz");
if (debug) {
LOG.debug("The next segment name is " + nextSegment);
}
FileSystem fs = nextSegment.getFileSystem(conf);
try {
if (!fs.exists(nextSegment)) {
break;
}
continue;
} catch (IOException e) {
// no code -- file did not already exist
}
} catch (IOException e) {
// no code -- file exists now, or directory bad. We try three
// times.
}
}
if (nextSegment == null) {
throw new RuntimeException("Failed to create a new file!");
}
if (debug) {
LOG.debug("Creating " + nextSegment
+ " for a job with a submit time of " + job.getSubmitTime());
}
deletees.add(nextSegment);
tempPaths.add(nextSegment);
tempGen = new DefaultOutputter<LoggedJob>();
tempGen.init(nextSegment, conf);
long currentIntervalNumber =
(job.getSubmitTime() - firstJobSubmitTime) / inputCycle;
currentIntervalEnd =
firstJobSubmitTime + ((currentIntervalNumber + 1) * inputCycle);
}
// the temp files contain UDadjusted times, but each temp file's
// content is in the same input cycle interval.
if (tempGen != null) {
tempGen.output(job);
}
job = reader.nextJob();
}
} catch (DeskewedJobTraceReader.OutOfOrderException e) {
return OUT_OF_ORDER_JOBS;
} finally {
if (tempGen != null) {
tempGen.close();
}
}
if (lastJobSubmitTime <= firstJobSubmitTime) {
LOG.error("All of your job[s] have the same submit time."
+ " Please just use your input file.");
return ALL_JOBS_SIMULTANEOUS;
}
double submitTimeSpan = lastJobSubmitTime - firstJobSubmitTime;
LOG.warn("Your input trace spans "
+ (lastJobSubmitTime - firstJobSubmitTime) + " ticks.");
double foldingRatio =
submitTimeSpan * (numberJobs + 1) / numberJobs / inputCycle;
if (debug) {
LOG.warn("run: submitTimeSpan = " + submitTimeSpan + ", numberJobs = "
+ numberJobs + ", inputCycle = " + inputCycle);
}
if (reader.neededSkewBufferSize() > 0) {
LOG.warn("You needed a -skew-buffer-length of "
+ reader.neededSkewBufferSize() + " but no more, for this input.");
}
double tProbability = timeDilation * concentration / foldingRatio;
if (debug) {
LOG.warn("run: timeDilation = " + timeDilation + ", concentration = "
+ concentration + ", foldingRatio = " + foldingRatio);
LOG.warn("The transcription probability is " + tProbability);
}
transcriptionRateInteger = (int) Math.floor(tProbability);
transcriptionRateFraction = tProbability - Math.floor(tProbability);
// Now read all the inputs in parallel
heap =
new PriorityQueue<Pair<LoggedJob, JobTraceReader>>(tempPaths.size(),
new JobEntryComparator());
for (Path tempPath : tempPaths) {
JobTraceReader thisReader = new JobTraceReader(tempPath, conf);
closees.add(thisReader);
LoggedJob streamFirstJob = thisReader.getNext();
long thisIndex =
(streamFirstJob.getSubmitTime() - firstJobSubmitTime) / inputCycle;
if (debug) {
LOG.debug("A job with submit time of "
+ streamFirstJob.getSubmitTime() + " is in interval # "
+ thisIndex);
}
adjustJobTimes(streamFirstJob);
if (debug) {
LOG.debug("That job's submit time is adjusted to "
+ streamFirstJob.getSubmitTime());
}
heap
.add(new Pair<LoggedJob, JobTraceReader>(streamFirstJob, thisReader));
}
Pair<LoggedJob, JobTraceReader> next = heap.poll();
while (next != null) {
maybeOutput(next.first());
if (debug) {
LOG.debug("The most recent job has an adjusted submit time of "
+ next.first().getSubmitTime());
LOG.debug(" Its replacement in the heap will come from input engine "
+ next.second());
}
LoggedJob replacement = next.second().getNext();
if (replacement == null) {
next.second().close();
if (debug) {
LOG.debug("That input engine is depleted.");
}
} else {
adjustJobTimes(replacement);
if (debug) {
LOG.debug("The replacement has an adjusted submit time of "
+ replacement.getSubmitTime());
}
heap.add(new Pair<LoggedJob, JobTraceReader>(replacement, next
.second()));
}
next = heap.poll();
}
} finally {
IOUtils.cleanup(null, reader);
if (outGen != null) {
outGen.close();
}
for (Pair<LoggedJob, JobTraceReader> heapEntry : heap) {
heapEntry.second().close();
}
for (Closeable closee : closees) {
closee.close();
}
if (!debug) {
Configuration conf = getConf();
for (Path deletee : deletees) {
FileSystem fs = deletee.getFileSystem(conf);
try {
fs.delete(deletee, false);
} catch (IOException e) {
// no code
}
}
}
}
return 0;
}
private void maybeOutput(LoggedJob job) throws IOException {
for (int i = 0; i < transcriptionRateInteger; ++i) {
outGen.output(job);
}
if (random.nextDouble() < transcriptionRateFraction) {
outGen.output(job);
}
}
private void adjustJobTimes(LoggedJob adjustee) {
long offsetInCycle =
(adjustee.getSubmitTime() - firstJobSubmitTime) % inputCycle;
long outputOffset = (long) ((double) offsetInCycle * timeDilation);
long adjustment =
firstJobSubmitTime + outputOffset - adjustee.getSubmitTime();
adjustee.adjustTimes(adjustment);
}
/**
* @param args
*/
public static void main(String[] args) {
Folder instance = new Folder();
int result = 0;
try {
result = ToolRunner.run(instance, args);
} catch (IOException e) {
e.printStackTrace(System.err);
System.exit(IO_ERROR);
} catch (Exception e) {
e.printStackTrace(System.err);
System.exit(OTHER_ERROR);
}
if (result != 0) {
System.exit(result);
}
return;
}
}
| 16,583 | 29.262774 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.jobhistory.AMStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobFinished;
import org.apache.hadoop.mapreduce.jobhistory.JobFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobInfoChangeEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobPriorityChangeEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobStatusChangedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobQueueChangeEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.jobhistory.MapAttemptFinished;
import org.apache.hadoop.mapreduce.jobhistory.MapAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent;
import org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished;
import org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptFinished;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskFailed;
import org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskFinished;
import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskUpdatedEvent;
import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values;
import org.apache.hadoop.util.StringUtils;
/**
* {@link JobBuilder} builds one job. It processes a sequence of
* {@link HistoryEvent}s.
*/
public class JobBuilder {
private static final long BYTES_IN_MEG =
StringUtils.TraditionalBinaryPrefix.string2long("1m");
static final private Log LOG = LogFactory.getLog(JobBuilder.class);
private String jobID;
private boolean finalized = false;
private ParsedJob result = new ParsedJob();
private Map<String, ParsedTask> mapTasks = new HashMap<String, ParsedTask>();
private Map<String, ParsedTask> reduceTasks =
new HashMap<String, ParsedTask>();
private Map<String, ParsedTask> otherTasks =
new HashMap<String, ParsedTask>();
private Map<String, ParsedTaskAttempt> attempts =
new HashMap<String, ParsedTaskAttempt>();
private Map<ParsedHost, ParsedHost> allHosts =
new HashMap<ParsedHost, ParsedHost>();
private org.apache.hadoop.mapreduce.jobhistory.JhCounters EMPTY_COUNTERS =
new org.apache.hadoop.mapreduce.jobhistory.JhCounters();
/**
* The number of splits a task can have, before we ignore them all.
*/
private final static int MAXIMUM_PREFERRED_LOCATIONS = 25;
private int[] attemptTimesPercentiles = null;
// Use this to search within the java options to get heap sizes.
// The heap size number is in Capturing Group 1.
// The heap size order-of-magnitude suffix is in Capturing Group 2
private static final Pattern heapPattern =
Pattern.compile("-Xmx([0-9]+[kKmMgGtT])");
private Properties jobConfigurationParameters = null;
public JobBuilder(String jobID) {
this.jobID = jobID;
}
public String getJobID() {
return jobID;
}
{
if (attemptTimesPercentiles == null) {
attemptTimesPercentiles = new int[19];
for (int i = 0; i < 19; ++i) {
attemptTimesPercentiles[i] = (i + 1) * 5;
}
}
}
/**
* Process one {@link HistoryEvent}
*
* @param event
* The {@link HistoryEvent} to be processed.
*/
public void process(HistoryEvent event) {
if (finalized) {
throw new IllegalStateException(
"JobBuilder.process(HistoryEvent event) called after ParsedJob built");
}
// these are in lexicographical order by class name.
if (event instanceof AMStartedEvent) {
// ignore this event as Rumen currently doesnt need this event
//TODO Enhance Rumen to process this event and capture restarts
return;
} else if (event instanceof NormalizedResourceEvent) {
// Log an warn message as NormalizedResourceEvent shouldn't be written.
LOG.warn("NormalizedResourceEvent should be ignored in history server.");
} else if (event instanceof JobFinishedEvent) {
processJobFinishedEvent((JobFinishedEvent) event);
} else if (event instanceof JobInfoChangeEvent) {
processJobInfoChangeEvent((JobInfoChangeEvent) event);
} else if (event instanceof JobInitedEvent) {
processJobInitedEvent((JobInitedEvent) event);
} else if (event instanceof JobPriorityChangeEvent) {
processJobPriorityChangeEvent((JobPriorityChangeEvent) event);
} else if (event instanceof JobQueueChangeEvent) {
processJobQueueChangeEvent((JobQueueChangeEvent) event);
} else if (event instanceof JobStatusChangedEvent) {
processJobStatusChangedEvent((JobStatusChangedEvent) event);
} else if (event instanceof JobSubmittedEvent) {
processJobSubmittedEvent((JobSubmittedEvent) event);
} else if (event instanceof JobUnsuccessfulCompletionEvent) {
processJobUnsuccessfulCompletionEvent((JobUnsuccessfulCompletionEvent) event);
} else if (event instanceof MapAttemptFinishedEvent) {
processMapAttemptFinishedEvent((MapAttemptFinishedEvent) event);
} else if (event instanceof ReduceAttemptFinishedEvent) {
processReduceAttemptFinishedEvent((ReduceAttemptFinishedEvent) event);
} else if (event instanceof TaskAttemptFinishedEvent) {
processTaskAttemptFinishedEvent((TaskAttemptFinishedEvent) event);
} else if (event instanceof TaskAttemptStartedEvent) {
processTaskAttemptStartedEvent((TaskAttemptStartedEvent) event);
} else if (event instanceof TaskAttemptUnsuccessfulCompletionEvent) {
processTaskAttemptUnsuccessfulCompletionEvent((TaskAttemptUnsuccessfulCompletionEvent) event);
} else if (event instanceof TaskFailedEvent) {
processTaskFailedEvent((TaskFailedEvent) event);
} else if (event instanceof TaskFinishedEvent) {
processTaskFinishedEvent((TaskFinishedEvent) event);
} else if (event instanceof TaskStartedEvent) {
processTaskStartedEvent((TaskStartedEvent) event);
} else if (event instanceof TaskUpdatedEvent) {
processTaskUpdatedEvent((TaskUpdatedEvent) event);
} else
throw new IllegalArgumentException(
"JobBuilder.process(HistoryEvent): unknown event type:"
+ event.getEventType() + " for event:" + event);
}
static String extract(Properties conf, String[] names, String defaultValue) {
for (String name : names) {
String result = conf.getProperty(name);
if (result != null) {
return result;
}
}
return defaultValue;
}
private Integer extractMegabytes(Properties conf, String[] names) {
String javaOptions = extract(conf, names, null);
if (javaOptions == null) {
return null;
}
Matcher matcher = heapPattern.matcher(javaOptions);
Integer heapMegabytes = null;
while (matcher.find()) {
String heapSize = matcher.group(1);
heapMegabytes =
((int) (StringUtils.TraditionalBinaryPrefix.string2long(heapSize) / BYTES_IN_MEG));
}
return heapMegabytes;
}
private void maybeSetHeapMegabytes(Integer megabytes) {
if (megabytes != null) {
result.setHeapMegabytes(megabytes);
}
}
private void maybeSetJobMapMB(Integer megabytes) {
if (megabytes != null) {
result.setJobMapMB(megabytes);
}
}
private void maybeSetJobReduceMB(Integer megabytes) {
if (megabytes != null) {
result.setJobReduceMB(megabytes);
}
}
/**
* Process a collection of JobConf {@link Properties}. We do not restrict it
* to be called once. It is okay to process a conf before, during or after the
* events.
*
* @param conf
* The job conf properties to be added.
*/
public void process(Properties conf) {
if (finalized) {
throw new IllegalStateException(
"JobBuilder.process(Properties conf) called after ParsedJob built");
}
//TODO remove this once the deprecate APIs in LoggedJob are removed
String queue = extract(conf, JobConfPropertyNames.QUEUE_NAMES
.getCandidates(), null);
// set the queue name if existing
if (queue != null) {
result.setQueue(queue);
}
result.setJobName(extract(conf, JobConfPropertyNames.JOB_NAMES
.getCandidates(), null));
maybeSetHeapMegabytes(extractMegabytes(conf,
JobConfPropertyNames.TASK_JAVA_OPTS_S.getCandidates()));
maybeSetJobMapMB(extractMegabytes(conf,
JobConfPropertyNames.MAP_JAVA_OPTS_S.getCandidates()));
maybeSetJobReduceMB(extractMegabytes(conf,
JobConfPropertyNames.REDUCE_JAVA_OPTS_S.getCandidates()));
this.jobConfigurationParameters = conf;
}
/**
* Request the builder to build the final object. Once called, the
* {@link JobBuilder} would accept no more events or job-conf properties.
*
* @return Parsed {@link ParsedJob} object.
*/
public ParsedJob build() {
// The main job here is to build CDFs and manage the conf
finalized = true;
// set the conf
if (jobConfigurationParameters != null) {
result.setJobProperties(jobConfigurationParameters);
}
// initialize all the per-job statistics gathering places
Histogram[] successfulMapAttemptTimes =
new Histogram[ParsedHost.numberOfDistances() + 1];
for (int i = 0; i < successfulMapAttemptTimes.length; ++i) {
successfulMapAttemptTimes[i] = new Histogram();
}
Histogram successfulReduceAttemptTimes = new Histogram();
Histogram[] failedMapAttemptTimes =
new Histogram[ParsedHost.numberOfDistances() + 1];
for (int i = 0; i < failedMapAttemptTimes.length; ++i) {
failedMapAttemptTimes[i] = new Histogram();
}
Histogram failedReduceAttemptTimes = new Histogram();
Histogram successfulNthMapperAttempts = new Histogram();
// Histogram successfulNthReducerAttempts = new Histogram();
// Histogram mapperLocality = new Histogram();
for (LoggedTask task : result.getMapTasks()) {
for (LoggedTaskAttempt attempt : task.getAttempts()) {
int distance = successfulMapAttemptTimes.length - 1;
Long runtime = null;
if (attempt.getFinishTime() > 0 && attempt.getStartTime() > 0) {
runtime = attempt.getFinishTime() - attempt.getStartTime();
if (attempt.getResult() == Values.SUCCESS) {
LoggedLocation host = attempt.getLocation();
List<LoggedLocation> locs = task.getPreferredLocations();
if (host != null && locs != null) {
for (LoggedLocation loc : locs) {
ParsedHost preferedLoc = new ParsedHost(loc);
distance =
Math.min(distance, preferedLoc
.distance(new ParsedHost(host)));
}
// mapperLocality.enter(distance);
}
if (attempt.getStartTime() > 0 && attempt.getFinishTime() > 0) {
if (runtime != null) {
successfulMapAttemptTimes[distance].enter(runtime);
}
}
TaskAttemptID attemptID = attempt.getAttemptID();
if (attemptID != null) {
successfulNthMapperAttempts.enter(attemptID.getId());
}
} else {
if (attempt.getResult() == Pre21JobHistoryConstants.Values.FAILED) {
if (runtime != null) {
failedMapAttemptTimes[distance].enter(runtime);
}
}
}
}
}
}
for (LoggedTask task : result.getReduceTasks()) {
for (LoggedTaskAttempt attempt : task.getAttempts()) {
Long runtime = attempt.getFinishTime() - attempt.getStartTime();
if (attempt.getFinishTime() > 0 && attempt.getStartTime() > 0) {
runtime = attempt.getFinishTime() - attempt.getStartTime();
}
if (attempt.getResult() == Values.SUCCESS) {
if (runtime != null) {
successfulReduceAttemptTimes.enter(runtime);
}
} else if (attempt.getResult() == Pre21JobHistoryConstants.Values.FAILED) {
failedReduceAttemptTimes.enter(runtime);
}
}
}
result.setFailedMapAttemptCDFs(mapCDFArrayList(failedMapAttemptTimes));
LoggedDiscreteCDF failedReduce = new LoggedDiscreteCDF();
failedReduce.setCDF(failedReduceAttemptTimes, attemptTimesPercentiles, 100);
result.setFailedReduceAttemptCDF(failedReduce);
result
.setSuccessfulMapAttemptCDFs(mapCDFArrayList(successfulMapAttemptTimes));
LoggedDiscreteCDF succReduce = new LoggedDiscreteCDF();
succReduce.setCDF(successfulReduceAttemptTimes, attemptTimesPercentiles,
100);
result.setSuccessfulReduceAttemptCDF(succReduce);
long totalSuccessfulAttempts = 0L;
long maxTriesToSucceed = 0L;
for (Map.Entry<Long, Long> ent : successfulNthMapperAttempts) {
totalSuccessfulAttempts += ent.getValue();
maxTriesToSucceed = Math.max(maxTriesToSucceed, ent.getKey());
}
if (totalSuccessfulAttempts > 0L) {
double[] successAfterI = new double[(int) maxTriesToSucceed + 1];
for (int i = 0; i < successAfterI.length; ++i) {
successAfterI[i] = 0.0D;
}
for (Map.Entry<Long, Long> ent : successfulNthMapperAttempts) {
successAfterI[ent.getKey().intValue()] =
((double) ent.getValue()) / totalSuccessfulAttempts;
}
result.setMapperTriesToSucceed(successAfterI);
} else {
result.setMapperTriesToSucceed(null);
}
return result;
}
private ArrayList<LoggedDiscreteCDF> mapCDFArrayList(Histogram[] data) {
ArrayList<LoggedDiscreteCDF> result = new ArrayList<LoggedDiscreteCDF>();
for (Histogram hist : data) {
LoggedDiscreteCDF discCDF = new LoggedDiscreteCDF();
discCDF.setCDF(hist, attemptTimesPercentiles, 100);
result.add(discCDF);
}
return result;
}
private static Values getPre21Value(String name) {
if (name.equalsIgnoreCase("JOB_CLEANUP")) {
return Values.CLEANUP;
}
if (name.equalsIgnoreCase("JOB_SETUP")) {
return Values.SETUP;
}
// Note that pre-21, the task state of a successful task was logged as
// SUCCESS while from 21 onwards, its logged as SUCCEEDED.
if (name.equalsIgnoreCase(TaskStatus.State.SUCCEEDED.toString())) {
return Values.SUCCESS;
}
return Values.valueOf(StringUtils.toUpperCase(name));
}
private void processTaskUpdatedEvent(TaskUpdatedEvent event) {
ParsedTask task = getTask(event.getTaskId().toString());
if (task == null) {
return;
}
task.setFinishTime(event.getFinishTime());
}
private void processTaskStartedEvent(TaskStartedEvent event) {
ParsedTask task =
getOrMakeTask(event.getTaskType(), event.getTaskId().toString(), true);
task.setStartTime(event.getStartTime());
task.setPreferredLocations(preferredLocationForSplits(event
.getSplitLocations()));
}
private void processTaskFinishedEvent(TaskFinishedEvent event) {
ParsedTask task =
getOrMakeTask(event.getTaskType(), event.getTaskId().toString(), false);
if (task == null) {
return;
}
task.setFinishTime(event.getFinishTime());
task.setTaskStatus(getPre21Value(event.getTaskStatus()));
task.incorporateCounters(((TaskFinished) event.getDatum()).counters);
}
private void processTaskFailedEvent(TaskFailedEvent event) {
ParsedTask task =
getOrMakeTask(event.getTaskType(), event.getTaskId().toString(), false);
if (task == null) {
return;
}
task.setFinishTime(event.getFinishTime());
task.setTaskStatus(getPre21Value(event.getTaskStatus()));
TaskFailed t = (TaskFailed)(event.getDatum());
task.putDiagnosticInfo(t.error.toString());
task.putFailedDueToAttemptId(t.failedDueToAttempt.toString());
org.apache.hadoop.mapreduce.jobhistory.JhCounters counters =
((TaskFailed) event.getDatum()).counters;
task.incorporateCounters(
counters == null ? EMPTY_COUNTERS : counters);
}
private void processTaskAttemptUnsuccessfulCompletionEvent(
TaskAttemptUnsuccessfulCompletionEvent event) {
ParsedTaskAttempt attempt =
getOrMakeTaskAttempt(event.getTaskType(), event.getTaskId().toString(),
event.getTaskAttemptId().toString());
if (attempt == null) {
return;
}
attempt.setResult(getPre21Value(event.getTaskStatus()));
attempt.setHostName(event.getHostname(), event.getRackName());
ParsedHost pHost =
getAndRecordParsedHost(event.getRackName(), event.getHostname());
if (pHost != null) {
attempt.setLocation(pHost.makeLoggedLocation());
}
attempt.setFinishTime(event.getFinishTime());
org.apache.hadoop.mapreduce.jobhistory.JhCounters counters =
((TaskAttemptUnsuccessfulCompletion) event.getDatum()).counters;
attempt.incorporateCounters(
counters == null ? EMPTY_COUNTERS : counters);
attempt.arraySetClockSplits(event.getClockSplits());
attempt.arraySetCpuUsages(event.getCpuUsages());
attempt.arraySetVMemKbytes(event.getVMemKbytes());
attempt.arraySetPhysMemKbytes(event.getPhysMemKbytes());
TaskAttemptUnsuccessfulCompletion t =
(TaskAttemptUnsuccessfulCompletion) (event.getDatum());
attempt.putDiagnosticInfo(t.error.toString());
}
private void processTaskAttemptStartedEvent(TaskAttemptStartedEvent event) {
ParsedTaskAttempt attempt =
getOrMakeTaskAttempt(event.getTaskType(), event.getTaskId().toString(),
event.getTaskAttemptId().toString());
if (attempt == null) {
return;
}
attempt.setStartTime(event.getStartTime());
attempt.putTrackerName(event.getTrackerName());
attempt.putHttpPort(event.getHttpPort());
attempt.putShufflePort(event.getShufflePort());
}
private void processTaskAttemptFinishedEvent(TaskAttemptFinishedEvent event) {
ParsedTaskAttempt attempt =
getOrMakeTaskAttempt(event.getTaskType(), event.getTaskId().toString(),
event.getAttemptId().toString());
if (attempt == null) {
return;
}
attempt.setResult(getPre21Value(event.getTaskStatus()));
ParsedHost pHost = getAndRecordParsedHost(event.getRackName(), event.getHostname());
if (pHost != null) {
attempt.setLocation(pHost.makeLoggedLocation());
}
attempt.setFinishTime(event.getFinishTime());
attempt
.incorporateCounters(((TaskAttemptFinished) event.getDatum()).counters);
}
private void processReduceAttemptFinishedEvent(
ReduceAttemptFinishedEvent event) {
ParsedTaskAttempt attempt =
getOrMakeTaskAttempt(event.getTaskType(), event.getTaskId().toString(),
event.getAttemptId().toString());
if (attempt == null) {
return;
}
attempt.setResult(getPre21Value(event.getTaskStatus()));
attempt.setHostName(event.getHostname(), event.getRackName());
ParsedHost pHost =
getAndRecordParsedHost(event.getRackName(), event.getHostname());
if (pHost != null) {
attempt.setLocation(pHost.makeLoggedLocation());
}
// XXX There may be redundant location info available in the event.
// We might consider extracting it from this event. Currently this
// is redundant, but making this will add future-proofing.
attempt.setFinishTime(event.getFinishTime());
attempt.setShuffleFinished(event.getShuffleFinishTime());
attempt.setSortFinished(event.getSortFinishTime());
attempt
.incorporateCounters(((ReduceAttemptFinished) event.getDatum()).counters);
attempt.arraySetClockSplits(event.getClockSplits());
attempt.arraySetCpuUsages(event.getCpuUsages());
attempt.arraySetVMemKbytes(event.getVMemKbytes());
attempt.arraySetPhysMemKbytes(event.getPhysMemKbytes());
}
private void processMapAttemptFinishedEvent(MapAttemptFinishedEvent event) {
ParsedTaskAttempt attempt =
getOrMakeTaskAttempt(event.getTaskType(), event.getTaskId().toString(),
event.getAttemptId().toString());
if (attempt == null) {
return;
}
attempt.setResult(getPre21Value(event.getTaskStatus()));
attempt.setHostName(event.getHostname(), event.getRackName());
ParsedHost pHost =
getAndRecordParsedHost(event.getRackName(), event.getHostname());
if (pHost != null) {
attempt.setLocation(pHost.makeLoggedLocation());
}
// XXX There may be redundant location info available in the event.
// We might consider extracting it from this event. Currently this
// is redundant, but making this will add future-proofing.
attempt.setFinishTime(event.getFinishTime());
attempt
.incorporateCounters(((MapAttemptFinished) event.getDatum()).counters);
attempt.arraySetClockSplits(event.getClockSplits());
attempt.arraySetCpuUsages(event.getCpuUsages());
attempt.arraySetVMemKbytes(event.getVMemKbytes());
attempt.arraySetPhysMemKbytes(event.getPhysMemKbytes());
}
private void processJobUnsuccessfulCompletionEvent(
JobUnsuccessfulCompletionEvent event) {
result.setOutcome(Pre21JobHistoryConstants.Values
.valueOf(event.getStatus()));
result.setFinishTime(event.getFinishTime());
// No counters in JobUnsuccessfulCompletionEvent
}
private void processJobSubmittedEvent(JobSubmittedEvent event) {
result.setJobID(event.getJobId().toString());
result.setJobName(event.getJobName());
result.setUser(event.getUserName());
result.setSubmitTime(event.getSubmitTime());
result.putJobConfPath(event.getJobConfPath());
result.putJobAcls(event.getJobAcls());
// set the queue name if existing
String queue = event.getJobQueueName();
if (queue != null) {
result.setQueue(queue);
}
}
private void processJobQueueChangeEvent(JobQueueChangeEvent event) {
// set the queue name if existing
String queue = event.getJobQueueName();
if (queue != null) {
result.setQueue(queue);
}
}
private void processJobStatusChangedEvent(JobStatusChangedEvent event) {
result.setOutcome(Pre21JobHistoryConstants.Values
.valueOf(event.getStatus()));
}
private void processJobPriorityChangeEvent(JobPriorityChangeEvent event) {
result.setPriority(LoggedJob.JobPriority.valueOf(event.getPriority()
.toString()));
}
private void processJobInitedEvent(JobInitedEvent event) {
result.setLaunchTime(event.getLaunchTime());
result.setTotalMaps(event.getTotalMaps());
result.setTotalReduces(event.getTotalReduces());
}
private void processJobInfoChangeEvent(JobInfoChangeEvent event) {
result.setLaunchTime(event.getLaunchTime());
}
private void processJobFinishedEvent(JobFinishedEvent event) {
result.setFinishTime(event.getFinishTime());
result.setJobID(jobID);
result.setOutcome(Values.SUCCESS);
JobFinished job = (JobFinished)event.getDatum();
Map<String, Long> countersMap =
JobHistoryUtils.extractCounters(job.totalCounters);
result.putTotalCounters(countersMap);
countersMap = JobHistoryUtils.extractCounters(job.mapCounters);
result.putMapCounters(countersMap);
countersMap = JobHistoryUtils.extractCounters(job.reduceCounters);
result.putReduceCounters(countersMap);
}
private ParsedTask getTask(String taskIDname) {
ParsedTask result = mapTasks.get(taskIDname);
if (result != null) {
return result;
}
result = reduceTasks.get(taskIDname);
if (result != null) {
return result;
}
return otherTasks.get(taskIDname);
}
/**
* @param type
* the task type
* @param taskIDname
* the task ID name, as a string
* @param allowCreate
* if true, we can create a task.
* @return
*/
private ParsedTask getOrMakeTask(TaskType type, String taskIDname,
boolean allowCreate) {
Map<String, ParsedTask> taskMap = otherTasks;
List<LoggedTask> tasks = this.result.getOtherTasks();
switch (type) {
case MAP:
taskMap = mapTasks;
tasks = this.result.getMapTasks();
break;
case REDUCE:
taskMap = reduceTasks;
tasks = this.result.getReduceTasks();
break;
default:
// no code
}
ParsedTask result = taskMap.get(taskIDname);
if (result == null && allowCreate) {
result = new ParsedTask();
result.setTaskType(getPre21Value(type.toString()));
result.setTaskID(taskIDname);
taskMap.put(taskIDname, result);
tasks.add(result);
}
return result;
}
private ParsedTaskAttempt getOrMakeTaskAttempt(TaskType type,
String taskIDName, String taskAttemptName) {
ParsedTask task = getOrMakeTask(type, taskIDName, false);
ParsedTaskAttempt result = attempts.get(taskAttemptName);
if (result == null && task != null) {
result = new ParsedTaskAttempt();
result.setAttemptID(taskAttemptName);
attempts.put(taskAttemptName, result);
task.getAttempts().add(result);
}
return result;
}
private ParsedHost getAndRecordParsedHost(String hostName) {
return getAndRecordParsedHost(null, hostName);
}
private ParsedHost getAndRecordParsedHost(String rackName, String hostName) {
ParsedHost result = null;
if (rackName == null) {
// for old (pre-23) job history files where hostname was represented as
// /rackname/hostname
result = ParsedHost.parse(hostName);
} else {
// for new (post-23) job history files
result = new ParsedHost(rackName, hostName);
}
if (result != null) {
ParsedHost canonicalResult = allHosts.get(result);
if (canonicalResult != null) {
return canonicalResult;
}
allHosts.put(result, result);
return result;
}
return null;
}
private ArrayList<LoggedLocation> preferredLocationForSplits(String splits) {
if (splits != null) {
ArrayList<LoggedLocation> locations = null;
StringTokenizer tok = new StringTokenizer(splits, ",", false);
if (tok.countTokens() <= MAXIMUM_PREFERRED_LOCATIONS) {
locations = new ArrayList<LoggedLocation>();
while (tok.hasMoreTokens()) {
String nextSplit = tok.nextToken();
ParsedHost node = getAndRecordParsedHost(nextSplit);
if (locations != null && node != null) {
locations.add(node.makeLoggedLocation());
}
}
return locations;
}
}
return null;
}
}
| 28,415 | 34.609023 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.text.ParseException;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
public abstract class TaskAttempt20LineEventEmitter extends HistoryEventEmitter {
static List<SingleEventEmitter> taskEventNonFinalSEEs =
new LinkedList<SingleEventEmitter>();
static List<SingleEventEmitter> taskEventFinalSEEs =
new LinkedList<SingleEventEmitter>();
static private final int DEFAULT_HTTP_PORT = 80;
Long originalStartTime = null;
org.apache.hadoop.mapreduce.TaskType originalTaskType = null;
static {
taskEventNonFinalSEEs.add(new TaskAttemptStartedEventEmitter());
taskEventNonFinalSEEs.add(new TaskAttemptFinishedEventEmitter());
taskEventNonFinalSEEs
.add(new TaskAttemptUnsuccessfulCompletionEventEmitter());
}
protected TaskAttempt20LineEventEmitter() {
super();
}
static private class TaskAttemptStartedEventEmitter extends
SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskAttemptIDName,
HistoryEventEmitter thatg) {
if (taskAttemptIDName == null) {
return null;
}
TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptIDName);
String startTime = line.get("START_TIME");
String taskType = line.get("TASK_TYPE");
String trackerName = line.get("TRACKER_NAME");
String httpPort = line.get("HTTP_PORT");
String locality = line.get("LOCALITY");
if (locality == null) {
locality = "";
}
String avataar = line.get("AVATAAR");
if (avataar == null) {
avataar = "";
}
if (startTime != null && taskType != null) {
TaskAttempt20LineEventEmitter that =
(TaskAttempt20LineEventEmitter) thatg;
that.originalStartTime = Long.parseLong(startTime);
that.originalTaskType =
Version20LogInterfaceUtils.get20TaskType(taskType);
int port =
httpPort.equals("") ? DEFAULT_HTTP_PORT : Integer
.parseInt(httpPort);
return new TaskAttemptStartedEvent(taskAttemptID,
that.originalTaskType, that.originalStartTime, trackerName, port, -1,
locality, avataar);
}
return null;
}
}
static private class TaskAttemptFinishedEventEmitter extends
SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskAttemptIDName,
HistoryEventEmitter thatg) {
if (taskAttemptIDName == null) {
return null;
}
TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptIDName);
String finishTime = line.get("FINISH_TIME");
String status = line.get("TASK_STATUS");
if (finishTime != null && status != null
&& status.equalsIgnoreCase("success")) {
String hostName = line.get("HOSTNAME");
String counters = line.get("COUNTERS");
String state = line.get("STATE_STRING");
TaskAttempt20LineEventEmitter that =
(TaskAttempt20LineEventEmitter) thatg;
ParsedHost pHost = ParsedHost.parse(hostName);
return new TaskAttemptFinishedEvent(taskAttemptID,
that.originalTaskType, status, Long.parseLong(finishTime),
pHost.getRackName(), pHost.getNodeName(), state,
maybeParseCounters(counters));
}
return null;
}
}
static private class TaskAttemptUnsuccessfulCompletionEventEmitter extends
SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskAttemptIDName,
HistoryEventEmitter thatg) {
if (taskAttemptIDName == null) {
return null;
}
TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptIDName);
String finishTime = line.get("FINISH_TIME");
String status = line.get("TASK_STATUS");
if (finishTime != null && status != null
&& !status.equalsIgnoreCase("success")) {
String hostName = line.get("HOSTNAME");
String error = line.get("ERROR");
TaskAttempt20LineEventEmitter that =
(TaskAttempt20LineEventEmitter) thatg;
ParsedHost pHost = ParsedHost.parse(hostName);
String rackName = null;
// Earlier versions of MR logged on hostnames (without rackname) for
// unsuccessful attempts
if (pHost != null) {
rackName = pHost.getRackName();
hostName = pHost.getNodeName();
}
return new TaskAttemptUnsuccessfulCompletionEvent
(taskAttemptID,
that.originalTaskType, status, Long.parseLong(finishTime),
hostName, -1, rackName, error, null);
}
return null;
}
}
}
| 5,919 | 33.418605 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/CDFPiecewiseLinearRandomGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
public class CDFPiecewiseLinearRandomGenerator extends CDFRandomGenerator {
/**
* @param cdf
* builds a CDFRandomValue engine around this
* {@link LoggedDiscreteCDF}, with a defaultly seeded RNG
*/
public CDFPiecewiseLinearRandomGenerator(LoggedDiscreteCDF cdf) {
super(cdf);
}
/**
* @param cdf
* builds a CDFRandomValue engine around this
* {@link LoggedDiscreteCDF}, with an explicitly seeded RNG
* @param seed
* the random number generator seed
*/
public CDFPiecewiseLinearRandomGenerator(LoggedDiscreteCDF cdf, long seed) {
super(cdf, seed);
}
/**
* TODO This code assumes that the empirical minimum resp. maximum is the
* epistomological minimum resp. maximum. This is probably okay for the
* minimum, because that likely represents a task where everything went well,
* but for the maximum we may want to develop a way of extrapolating past the
* maximum.
*/
@Override
public long valueAt(double probability) {
int rangeFloor = floorIndex(probability);
double segmentProbMin = getRankingAt(rangeFloor);
double segmentProbMax = getRankingAt(rangeFloor + 1);
long segmentMinValue = getDatumAt(rangeFloor);
long segmentMaxValue = getDatumAt(rangeFloor + 1);
// If this is zero, this object is based on an ill-formed cdf
double segmentProbRange = segmentProbMax - segmentProbMin;
long segmentDatumRange = segmentMaxValue - segmentMinValue;
long result = (long) ((probability - segmentProbMin) / segmentProbRange * segmentDatumRange)
+ segmentMinValue;
return result;
}
}
| 2,496 | 35.188406 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobInfoChangeEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobPriorityChangeEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobStatusChangedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
import org.apache.hadoop.security.authorize.AccessControlList;
public class Job20LineHistoryEventEmitter extends HistoryEventEmitter {
static List<SingleEventEmitter> nonFinals =
new LinkedList<SingleEventEmitter>();
static List<SingleEventEmitter> finals = new LinkedList<SingleEventEmitter>();
Long originalSubmitTime = null;
static {
nonFinals.add(new JobSubmittedEventEmitter());
nonFinals.add(new JobPriorityChangeEventEmitter());
nonFinals.add(new JobStatusChangedEventEmitter());
nonFinals.add(new JobInitedEventEmitter());
nonFinals.add(new JobInfoChangeEventEmitter());
finals.add(new JobUnsuccessfulCompletionEventEmitter());
finals.add(new JobFinishedEventEmitter());
}
Job20LineHistoryEventEmitter() {
super();
}
static private class JobSubmittedEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String jobIDName,
HistoryEventEmitter thatg) {
JobID jobID = JobID.forName(jobIDName);
if (jobIDName == null) {
return null;
}
String submitTime = line.get("SUBMIT_TIME");
String jobConf = line.get("JOBCONF");
String user = line.get("USER");
if (user == null) {
user = "nulluser";
}
String jobName = line.get("JOBNAME");
String jobQueueName = line.get("JOB_QUEUE");// could be null
String workflowId = line.get("WORKFLOW_ID");
if (workflowId == null) {
workflowId = "";
}
String workflowName = line.get("WORKFLOW_NAME");
if (workflowName == null) {
workflowName = "";
}
String workflowNodeName = line.get("WORKFLOW_NODE_NAME");
if (workflowNodeName == null) {
workflowNodeName = "";
}
String workflowAdjacencies = line.get("WORKFLOW_ADJACENCIES");
if (workflowAdjacencies == null) {
workflowAdjacencies = "";
}
String workflowTags = line.get("WORKFLOW_TAGS");
if (workflowTags == null) {
workflowTags = "";
}
if (submitTime != null) {
Job20LineHistoryEventEmitter that =
(Job20LineHistoryEventEmitter) thatg;
that.originalSubmitTime = Long.parseLong(submitTime);
Map<JobACL, AccessControlList> jobACLs =
new HashMap<JobACL, AccessControlList>();
return new JobSubmittedEvent(jobID, jobName, user,
that.originalSubmitTime, jobConf, jobACLs, jobQueueName,
workflowId, workflowName, workflowNodeName, workflowAdjacencies,
workflowTags);
}
return null;
}
}
static private class JobPriorityChangeEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String jobIDName,
HistoryEventEmitter thatg) {
JobID jobID = JobID.forName(jobIDName);
if (jobIDName == null) {
return null;
}
String priority = line.get("JOB_PRIORITY");
if (priority != null) {
return new JobPriorityChangeEvent(jobID, JobPriority.valueOf(priority));
}
return null;
}
}
static private class JobInitedEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String jobIDName,
HistoryEventEmitter thatg) {
if (jobIDName == null) {
return null;
}
JobID jobID = JobID.forName(jobIDName);
String launchTime = line.get("LAUNCH_TIME");
String status = line.get("JOB_STATUS");
String totalMaps = line.get("TOTAL_MAPS");
String totalReduces = line.get("TOTAL_REDUCES");
String uberized = line.get("UBERIZED");
if (launchTime != null && totalMaps != null && totalReduces != null) {
return new JobInitedEvent(jobID, Long.parseLong(launchTime), Integer
.parseInt(totalMaps), Integer.parseInt(totalReduces), status,
Boolean.parseBoolean(uberized));
}
return null;
}
}
static private class JobStatusChangedEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String jobIDName,
HistoryEventEmitter thatg) {
if (jobIDName == null) {
return null;
}
JobID jobID = JobID.forName(jobIDName);
String status = line.get("JOB_STATUS");
if (status != null) {
return new JobStatusChangedEvent(jobID, status);
}
return null;
}
}
static private class JobInfoChangeEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String jobIDName,
HistoryEventEmitter thatg) {
if (jobIDName == null) {
return null;
}
JobID jobID = JobID.forName(jobIDName);
String launchTime = line.get("LAUNCH_TIME");
if (launchTime != null) {
Job20LineHistoryEventEmitter that =
(Job20LineHistoryEventEmitter) thatg;
return new JobInfoChangeEvent(jobID, that.originalSubmitTime, Long
.parseLong(launchTime));
}
return null;
}
}
static private class JobUnsuccessfulCompletionEventEmitter extends
SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String jobIDName,
HistoryEventEmitter thatg) {
if (jobIDName == null) {
return null;
}
JobID jobID = JobID.forName(jobIDName);
String finishTime = line.get("FINISH_TIME");
String status = line.get("JOB_STATUS");
String finishedMaps = line.get("FINISHED_MAPS");
String finishedReduces = line.get("FINISHED_REDUCES");
if (status != null && !status.equalsIgnoreCase("success")
&& finishTime != null && finishedMaps != null
&& finishedReduces != null) {
return new JobUnsuccessfulCompletionEvent(jobID, Long
.parseLong(finishTime), Integer.parseInt(finishedMaps), Integer
.parseInt(finishedReduces), status);
}
return null;
}
}
static private class JobFinishedEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String jobIDName,
HistoryEventEmitter thatg) {
if (jobIDName == null) {
return null;
}
JobID jobID = JobID.forName(jobIDName);
String finishTime = line.get("FINISH_TIME");
String status = line.get("JOB_STATUS");
String finishedMaps = line.get("FINISHED_MAPS");
String finishedReduces = line.get("FINISHED_REDUCES");
String failedMaps = line.get("FAILED_MAPS");
String failedReduces = line.get("FAILED_REDUCES");
String counters = line.get("COUNTERS");
if (status != null && status.equalsIgnoreCase("success")
&& finishTime != null && finishedMaps != null
&& finishedReduces != null) {
return new JobFinishedEvent(jobID, Long.parseLong(finishTime), Integer
.parseInt(finishedMaps), Integer.parseInt(finishedReduces), Integer
.parseInt(failedMaps), Integer.parseInt(failedReduces), null, null,
maybeParseCounters(counters));
}
return null;
}
}
@Override
List<SingleEventEmitter> finalSEEs() {
return finals;
}
@Override
List<SingleEventEmitter> nonFinalSEEs() {
return nonFinals;
}
}
| 8,898 | 31.010791 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.regex.Pattern;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.v2.hs.JobHistory;
/**
* Job History related constants for Hadoop releases prior to 0.21
*/
public class Pre21JobHistoryConstants {
/**
* Job history files contain key="value" pairs, where keys belong to this enum.
* It acts as a global namespace for all keys.
*/
static enum Keys {
JOBTRACKERID,
START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF, SUBMIT_TIME,
LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES,
FINISHED_MAPS, FINISHED_REDUCES, JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE,
ERROR, TASK_ATTEMPT_ID, TASK_STATUS, COPY_PHASE, SORT_PHASE, REDUCE_PHASE,
SHUFFLE_FINISHED, SORT_FINISHED, MAP_FINISHED, COUNTERS, SPLITS,
JOB_PRIORITY, HTTP_PORT, TRACKER_NAME, STATE_STRING, VERSION
}
/**
* This enum contains some of the values commonly used by history log events.
* since values in history can only be strings - Values.name() is used in
* most places in history file.
*/
public static enum Values {
SUCCESS, FAILED, KILLED, MAP, REDUCE, CLEANUP, RUNNING, PREP, SETUP
}
/**
* Regex for Pre21 V1(old) jobhistory filename
* i.e jt-identifier_job-id_user-name_job-name
*/
static final Pattern JOBHISTORY_FILENAME_REGEX_V1 =
Pattern.compile("[^.].+_(" + JobID.JOBID_REGEX + ")_.+");
/**
* Regex for Pre21 V2(new) jobhistory filename
* i.e job-id_user-name_job-name
*/
static final Pattern JOBHISTORY_FILENAME_REGEX_V2 =
Pattern.compile("(" + JobID.JOBID_REGEX + ")_.+");
static final String OLD_FULL_SUFFIX_REGEX_STRING =
"(?:\\.[0-9]+" + Pattern.quote(JobHistory.OLD_SUFFIX) + ")";
/**
* Regex for Pre21 V1(old) jobhistory conf filename
* i.e jt-identifier_job-id_conf.xml
*/
static final Pattern CONF_FILENAME_REGEX_V1 =
Pattern.compile("[^.].+_(" + JobID.JOBID_REGEX + ")_conf.xml"
+ OLD_FULL_SUFFIX_REGEX_STRING + "?");
/**
* Regex for Pre21 V2(new) jobhistory conf filename
* i.e job-id_conf.xml
*/
static final Pattern CONF_FILENAME_REGEX_V2 =
Pattern.compile("(" + JobID.JOBID_REGEX + ")_conf.xml"
+ OLD_FULL_SUFFIX_REGEX_STRING + "?");
}
| 3,133 | 35.870588 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobConfigurationParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.w3c.dom.Node;
import org.w3c.dom.Text;
import org.xml.sax.SAXException;
/**
* {@link JobConfigurationParser} parses the job configuration xml file, and
* extracts configuration properties. It parses the file using a
* stream-parser and thus is more memory efficient. [This optimization may be
* postponed for a future release]
*/
public class JobConfigurationParser {
/**
* Parse the job configuration file (as an input stream) and return a
* {@link Properties} collection. The input stream will not be closed after
* return from the call.
*
* @param input
* The input data.
* @return A {@link Properties} collection extracted from the job
* configuration xml.
* @throws IOException
*/
static Properties parse(InputStream input) throws IOException {
Properties result = new Properties();
try {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc = db.parse(input);
Element root = doc.getDocumentElement();
if (!"configuration".equals(root.getTagName())) {
System.out.print("root is not a configuration node");
return null;
}
NodeList props = root.getChildNodes();
for (int i = 0; i < props.getLength(); ++i) {
Node propNode = props.item(i);
if (!(propNode instanceof Element))
continue;
Element prop = (Element) propNode;
if (!"property".equals(prop.getTagName())) {
System.out.print("bad conf file: element not <property>");
}
NodeList fields = prop.getChildNodes();
String attr = null;
String value = null;
@SuppressWarnings("unused")
boolean finalParameter = false;
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element)) {
continue;
}
Element field = (Element) fieldNode;
if ("name".equals(field.getTagName()) && field.hasChildNodes()) {
attr = ((Text) field.getFirstChild()).getData().trim();
}
if ("value".equals(field.getTagName()) && field.hasChildNodes()) {
value = ((Text) field.getFirstChild()).getData();
}
if ("final".equals(field.getTagName()) && field.hasChildNodes()) {
finalParameter =
"true".equals(((Text) field.getFirstChild()).getData());
}
}
if (attr != null && value != null) {
result.put(attr, value);
}
}
} catch (ParserConfigurationException e) {
return null;
} catch (SAXException e) {
return null;
}
return result;
}
}
| 3,943 | 32.423729 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/RackNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.Set;
/**
* {@link RackNode} represents a rack node in the cluster topology.
*/
public final class RackNode extends Node {
public RackNode(String name, int level) {
// Hack: ensuring rack name starts with "/".
super(name.startsWith("/") ? name : "/" + name, level);
}
@Override
public synchronized boolean addChild(Node child) {
if (!(child instanceof MachineNode)) {
throw new IllegalArgumentException(
"Only MachineNode can be added to RackNode");
}
return super.addChild(child);
}
/**
* Get the machine nodes that belong to the rack.
* @return The machine nodes that belong to the rack.
*/
@SuppressWarnings({ "cast", "unchecked" })
public Set<MachineNode> getMachinesInRack() {
return (Set<MachineNode>)(Set)getChildren();
}
}
| 1,670 | 33.102041 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DefaultOutputter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.Compressor;
/**
* The default {@link Outputter} that outputs to a plain file. Compression
* will be applied if the path has the right suffix.
*/
public class DefaultOutputter<T> implements Outputter<T> {
JsonObjectMapperWriter<T> writer;
Compressor compressor;
@Override
public void init(Path path, Configuration conf) throws IOException {
FileSystem fs = path.getFileSystem(conf);
CompressionCodec codec = new CompressionCodecFactory(conf).getCodec(path);
OutputStream output;
if (codec != null) {
compressor = CodecPool.getCompressor(codec);
output = codec.createOutputStream(fs.create(path), compressor);
} else {
output = fs.create(path);
}
writer = new JsonObjectMapperWriter<T>(output,
conf.getBoolean("rumen.output.pretty.print", true));
}
@Override
public void output(T object) throws IOException {
writer.write(object);
}
@Override
public void close() throws IOException {
try {
writer.close();
} finally {
if (compressor != null) {
CodecPool.returnCompressor(compressor);
}
}
}
}
| 2,344 | 32.5 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobStoryProducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Closeable;
import java.io.IOException;
/**
* {@link JobStoryProducer} produces the sequence of {@link JobStory}'s.
*/
public interface JobStoryProducer extends Closeable {
/**
* Get the next job.
* @return The next job. Or null if no more job is available.
* @throws IOException
*/
JobStory getNextJob() throws IOException;
}
| 1,208 | 34.558824 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TaskInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
public class TaskInfo {
private final long bytesIn;
private final int recsIn;
private final long bytesOut;
private final int recsOut;
private final long maxMemory;
private final ResourceUsageMetrics metrics;
public TaskInfo(long bytesIn, int recsIn, long bytesOut, int recsOut,
long maxMemory) {
this(bytesIn, recsIn, bytesOut, recsOut, maxMemory,
new ResourceUsageMetrics());
}
public TaskInfo(long bytesIn, int recsIn, long bytesOut, int recsOut,
long maxMemory, ResourceUsageMetrics metrics) {
this.bytesIn = bytesIn;
this.recsIn = recsIn;
this.bytesOut = bytesOut;
this.recsOut = recsOut;
this.maxMemory = maxMemory;
this.metrics = metrics;
}
/**
* @return Raw bytes read from the FileSystem into the task. Note that this
* may not always match the input bytes to the task.
*/
public long getInputBytes() {
return bytesIn;
}
/**
* @return Number of records input to this task.
*/
public int getInputRecords() {
return recsIn;
}
/**
* @return Raw bytes written to the destination FileSystem. Note that this may
* not match output bytes.
*/
public long getOutputBytes() {
return bytesOut;
}
/**
* @return Number of records output from this task.
*/
public int getOutputRecords() {
return recsOut;
}
/**
* @return Memory used by the task leq the heap size.
*/
public long getTaskMemory() {
return maxMemory;
}
/**
* @return Resource usage metrics
*/
public ResourceUsageMetrics getResourceUsageMetrics() {
return metrics;
}
}
| 2,484 | 27.238636 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedHost.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import org.apache.hadoop.tools.rumen.datatypes.NodeName;
public class ParsedHost {
private final String rackName;
private final String nodeName;
/**
* TODO the following only works for /rack/host format. Change to support
* arbitrary level of network names.
*/
private static final Pattern splitPattern = Pattern
.compile("/([^/]+)/([^/]+)");
/**
* TODO handle arbitrary level of network names.
*/
static int numberOfDistances() {
return 3;
}
String nameComponent(int i) throws IllegalArgumentException {
switch (i) {
case 0:
return rackName;
case 1:
return nodeName;
default:
throw new IllegalArgumentException(
"Host location component index out of range.");
}
}
@Override
public int hashCode() {
return rackName.hashCode() * 17 + nodeName.hashCode();
}
public static ParsedHost parse(String name) {
// separate out the node name
Matcher matcher = splitPattern.matcher(name);
if (!matcher.matches())
return null;
return new ParsedHost(matcher.group(1), matcher.group(2));
}
private String process(String name) {
return name == null
? null
: name.startsWith("/") ? name.substring(1) : name;
}
public ParsedHost(LoggedLocation loc) {
List<NodeName> coordinates = loc.getLayers();
rackName = process(coordinates.get(0).getRackName());
nodeName = process(coordinates.get(1).getHostName());
}
LoggedLocation makeLoggedLocation() {
LoggedLocation result = new LoggedLocation();
List<String> coordinates = new ArrayList<String>();
coordinates.add(rackName);
coordinates.add(nodeName);
result.setLayers(coordinates);
return result;
}
public String getNodeName() {
return nodeName;
}
public String getRackName() {
return rackName;
}
// expects the broadest name first
ParsedHost(String rackName, String nodeName) {
this.rackName = process(rackName);
this.nodeName = process(nodeName);
}
@Override
public boolean equals(Object other) {
if (!(other instanceof ParsedHost)) {
return false;
}
ParsedHost host = (ParsedHost) other;
return (nodeName.equals(host.nodeName) && rackName.equals(host.rackName));
}
int distance(ParsedHost other) {
if (nodeName.equals(other.nodeName)) {
return 0;
}
if (rackName.equals(other.rackName)) {
return 1;
}
return 2;
}
}
| 3,437 | 24.466667 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.jobhistory.JhCounter;
import org.apache.hadoop.mapreduce.jobhistory.JhCounterGroup;
import org.apache.hadoop.mapreduce.jobhistory.JhCounters;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.annotate.JsonAnySetter;
/**
* A {@link LoggedTask} represents a [hadoop] task that is part of a hadoop job.
* It knows about the [pssibly empty] sequence of attempts, its I/O footprint,
* and its runtime.
*
* All of the public methods are simply accessors for the instance variables we
* want to write out in the JSON files.
*
*/
public class LoggedTask implements DeepCompare {
long inputBytes = -1L;
long inputRecords = -1L;
long outputBytes = -1L;
long outputRecords = -1L;
TaskID taskID;
long startTime = -1L;
long finishTime = -1L;
Pre21JobHistoryConstants.Values taskType;
Pre21JobHistoryConstants.Values taskStatus;
List<LoggedTaskAttempt> attempts = new ArrayList<LoggedTaskAttempt>();
List<LoggedLocation> preferredLocations = Collections.emptyList();
static private Set<String> alreadySeenAnySetterAttributes =
new TreeSet<String>();
// for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println("In LoggedJob, we saw the unknown attribute "
+ attributeName + ".");
}
}
LoggedTask() {
super();
}
void adjustTimes(long adjustment) {
startTime += adjustment;
finishTime += adjustment;
for (LoggedTaskAttempt attempt : attempts) {
attempt.adjustTimes(adjustment);
}
}
public long getInputBytes() {
return inputBytes;
}
void setInputBytes(long inputBytes) {
this.inputBytes = inputBytes;
}
public long getInputRecords() {
return inputRecords;
}
void setInputRecords(long inputRecords) {
this.inputRecords = inputRecords;
}
public long getOutputBytes() {
return outputBytes;
}
void setOutputBytes(long outputBytes) {
this.outputBytes = outputBytes;
}
public long getOutputRecords() {
return outputRecords;
}
void setOutputRecords(long outputRecords) {
this.outputRecords = outputRecords;
}
public TaskID getTaskID() {
return taskID;
}
void setTaskID(String taskID) {
this.taskID = TaskID.forName(taskID);
}
public long getStartTime() {
return startTime;
}
void setStartTime(long startTime) {
this.startTime = startTime;
}
public long getFinishTime() {
return finishTime;
}
void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
public List<LoggedTaskAttempt> getAttempts() {
return attempts;
}
void setAttempts(List<LoggedTaskAttempt> attempts) {
if (attempts == null) {
this.attempts = new ArrayList<LoggedTaskAttempt>();
} else {
this.attempts = attempts;
}
}
public List<LoggedLocation> getPreferredLocations() {
return preferredLocations;
}
void setPreferredLocations(List<LoggedLocation> preferredLocations) {
if (preferredLocations == null || preferredLocations.isEmpty()) {
this.preferredLocations = Collections.emptyList();
} else {
this.preferredLocations = preferredLocations;
}
}
public Pre21JobHistoryConstants.Values getTaskStatus() {
return taskStatus;
}
void setTaskStatus(Pre21JobHistoryConstants.Values taskStatus) {
this.taskStatus = taskStatus;
}
public Pre21JobHistoryConstants.Values getTaskType() {
return taskType;
}
void setTaskType(Pre21JobHistoryConstants.Values taskType) {
this.taskType = taskType;
}
private void incorporateMapCounters(JhCounters counters) {
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
task.inputBytes = val;
}
}, counters, "HDFS_BYTES_READ");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
task.outputBytes = val;
}
}, counters, "FILE_BYTES_WRITTEN");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
task.inputRecords = val;
}
}, counters, "MAP_INPUT_RECORDS");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
task.outputRecords = val;
}
}, counters, "MAP_OUTPUT_RECORDS");
}
private void incorporateReduceCounters(JhCounters counters) {
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
task.inputBytes = val;
}
}, counters, "REDUCE_SHUFFLE_BYTES");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
task.outputBytes = val;
}
}, counters, "HDFS_BYTES_WRITTEN");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
task.inputRecords = val;
}
}, counters, "REDUCE_INPUT_RECORDS");
incorporateCounter(new SetField(this) {
@Override
void set(long val) {
task.outputRecords = val;
}
}, counters, "REDUCE_OUTPUT_RECORDS");
}
// incorporate event counters
// LoggedTask MUST KNOW ITS TYPE BEFORE THIS CALL
public void incorporateCounters(JhCounters counters) {
switch (taskType) {
case MAP:
incorporateMapCounters(counters);
return;
case REDUCE:
incorporateReduceCounters(counters);
return;
// NOT exhaustive
}
}
private static String canonicalizeCounterName(String nonCanonicalName) {
String result = StringUtils.toLowerCase(nonCanonicalName);
result = result.replace(' ', '|');
result = result.replace('-', '|');
result = result.replace('_', '|');
result = result.replace('.', '|');
return result;
}
private abstract class SetField {
LoggedTask task;
SetField(LoggedTask task) {
this.task = task;
}
abstract void set(long value);
}
private static void incorporateCounter(SetField thunk, JhCounters counters,
String counterName) {
counterName = canonicalizeCounterName(counterName);
for (JhCounterGroup group : counters.groups) {
for (JhCounter counter : group.counts) {
if (counterName
.equals(canonicalizeCounterName(counter.name.toString()))) {
thunk.set(counter.value);
return;
}
}
}
}
private void compare1(long c1, long c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 != c2) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(String c1, String c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || !c1.equals(c2)) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compare1(Pre21JobHistoryConstants.Values c1,
Pre21JobHistoryConstants.Values c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || !c1.equals(c2)) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
}
private void compareLoggedLocations(List<LoggedLocation> c1,
List<LoggedLocation> c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || c1.size() != c2.size()) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
for (int i = 0; i < c1.size(); ++i) {
c1.get(i).deepCompare(c2.get(i), new TreePath(loc, eltname, i));
}
}
private void compareLoggedTaskAttempts(List<LoggedTaskAttempt> c1,
List<LoggedTaskAttempt> c2, TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
if (c1 == null || c2 == null || c1.size() != c2.size()) {
throw new DeepInequalityException(eltname + " miscompared", new TreePath(
loc, eltname));
}
for (int i = 0; i < c1.size(); ++i) {
c1.get(i).deepCompare(c2.get(i), new TreePath(loc, eltname, i));
}
}
public void deepCompare(DeepCompare comparand, TreePath loc)
throws DeepInequalityException {
if (!(comparand instanceof LoggedTask)) {
throw new DeepInequalityException("comparand has wrong type", loc);
}
LoggedTask other = (LoggedTask) comparand;
compare1(inputBytes, other.inputBytes, loc, "inputBytes");
compare1(inputRecords, other.inputRecords, loc, "inputRecords");
compare1(outputBytes, other.outputBytes, loc, "outputBytes");
compare1(outputRecords, other.outputRecords, loc, "outputRecords");
compare1(taskID.toString(), other.taskID.toString(), loc, "taskID");
compare1(startTime, other.startTime, loc, "startTime");
compare1(finishTime, other.finishTime, loc, "finishTime");
compare1(taskType, other.taskType, loc, "taskType");
compare1(taskStatus, other.taskStatus, loc, "taskStatus");
compareLoggedTaskAttempts(attempts, other.attempts, loc, "attempts");
compareLoggedLocations(preferredLocations, other.preferredLocations, loc,
"preferredLocations");
}
}
| 10,598 | 27.41555 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Closeable;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.map.DeserializationConfig;
import org.codehaus.jackson.map.ObjectMapper;
/**
* A simple wrapper for parsing JSON-encoded data using ObjectMapper.
*
* @param <T>
* The (base) type of the object(s) to be parsed by this parser.
*/
class JsonObjectMapperParser<T> implements Closeable {
private final ObjectMapper mapper;
private final Class<? extends T> clazz;
private final JsonParser jsonParser;
/**
* Constructor.
*
* @param path
* Path to the JSON data file, possibly compressed.
* @param conf
* @throws IOException
*/
public JsonObjectMapperParser(Path path, Class<? extends T> clazz,
Configuration conf) throws IOException {
mapper = new ObjectMapper();
mapper.configure(
DeserializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
this.clazz = clazz;
InputStream input = new PossiblyDecompressedInputStream(path, conf);
jsonParser = mapper.getJsonFactory().createJsonParser(input);
}
/**
* Constructor.
*
* @param input
* The input stream for the JSON data.
*/
public JsonObjectMapperParser(InputStream input, Class<? extends T> clazz)
throws IOException {
mapper = new ObjectMapper();
mapper.configure(
DeserializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
this.clazz = clazz;
jsonParser = mapper.getJsonFactory().createJsonParser(input);
}
/**
* Get the next object from the trace.
*
* @return The next instance of the object. Or null if we reach the end of
* stream.
* @throws IOException
*/
public T getNext() throws IOException {
try {
return mapper.readValue(jsonParser, clazz);
} catch (EOFException e) {
return null;
}
}
@Override
public void close() throws IOException {
jsonParser.close();
}
}
| 2,961 | 30.178947 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedConfigFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.Properties;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import java.io.InputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.Charset;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.w3c.dom.Document;
import org.w3c.dom.NodeList;
import org.w3c.dom.Node;
import org.w3c.dom.Element;
import org.w3c.dom.Text;
import org.xml.sax.SAXException;
class ParsedConfigFile {
private static final Pattern jobIDPattern =
Pattern.compile("_(job_[0-9]+_[0-9]+)_");
private static final Pattern heapPattern =
Pattern.compile("-Xmx([0-9]+)([mMgG])");
private static final Charset UTF_8 = Charset.forName("UTF-8");
final int heapMegabytes;
final String queue;
final String jobName;
final int clusterMapMB;
final int clusterReduceMB;
final int jobMapMB;
final int jobReduceMB;
final String jobID;
final boolean valid;
final Properties properties = new Properties();
private int maybeGetIntValue(String propName, String attr, String value,
int oldValue) {
if (propName.equals(attr) && value != null) {
try {
return Integer.parseInt(value);
} catch (NumberFormatException e) {
return oldValue;
}
}
return oldValue;
}
@SuppressWarnings("hiding")
@Deprecated
ParsedConfigFile(String filenameLine, String xmlString) {
super();
int heapMegabytes = -1;
String queue = null;
String jobName = null;
int clusterMapMB = -1;
int clusterReduceMB = -1;
int jobMapMB = -1;
int jobReduceMB = -1;
String jobID = null;
boolean valid = true;
Matcher jobIDMatcher = jobIDPattern.matcher(filenameLine);
if (jobIDMatcher.find()) {
jobID = jobIDMatcher.group(1);
}
try {
InputStream is = new ByteArrayInputStream(xmlString.getBytes(UTF_8));
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc = db.parse(is);
Element root = doc.getDocumentElement();
if (!"configuration".equals(root.getTagName())) {
System.out.print("root is not a configuration node");
valid = false;
}
NodeList props = root.getChildNodes();
for (int i = 0; i < props.getLength(); ++i) {
Node propNode = props.item(i);
if (!(propNode instanceof Element))
continue;
Element prop = (Element) propNode;
if (!"property".equals(prop.getTagName())) {
System.out.print("bad conf file: element not <property>");
}
NodeList fields = prop.getChildNodes();
String attr = null;
String value = null;
@SuppressWarnings("unused")
boolean finalParameter = false;
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element)) {
continue;
}
Element field = (Element) fieldNode;
if ("name".equals(field.getTagName()) && field.hasChildNodes()) {
attr = ((Text) field.getFirstChild()).getData().trim();
}
if ("value".equals(field.getTagName()) && field.hasChildNodes()) {
value = ((Text) field.getFirstChild()).getData();
}
if ("final".equals(field.getTagName()) && field.hasChildNodes()) {
finalParameter =
"true".equals(((Text) field.getFirstChild()).getData());
}
}
properties.setProperty(attr, value);
if ("mapred.child.java.opts".equals(attr)) {
Matcher matcher = heapPattern.matcher(value);
if (matcher.find()) {
String heapSize = matcher.group(1);
heapMegabytes = Integer.parseInt(heapSize);
if (matcher.group(2).equalsIgnoreCase("G")) {
heapMegabytes *= 1024;
}
}
}
if (MRJobConfig.QUEUE_NAME.equals(attr)) {
queue = value;
}
if (MRJobConfig.JOB_NAME.equals(attr)) {
jobName = value;
}
clusterMapMB =
maybeGetIntValue(MRConfig.MAPMEMORY_MB, attr, value, clusterMapMB);
clusterReduceMB =
maybeGetIntValue(MRConfig.REDUCEMEMORY_MB, attr, value,
clusterReduceMB);
jobMapMB =
maybeGetIntValue(MRJobConfig.MAP_MEMORY_MB, attr, value, jobMapMB);
jobReduceMB =
maybeGetIntValue(MRJobConfig.REDUCE_MEMORY_MB, attr, value,
jobReduceMB);
}
valid = true;
} catch (ParserConfigurationException e) {
valid = false;
} catch (SAXException e) {
valid = false;
} catch (IOException e) {
valid = false;
}
this.heapMegabytes = heapMegabytes;
this.queue = queue;
this.jobName = jobName;
this.clusterMapMB = clusterMapMB;
this.clusterReduceMB = clusterReduceMB;
this.jobMapMB = jobMapMB;
this.jobReduceMB = jobReduceMB;
this.jobID = jobID;
this.valid = valid;
}
}
| 6,151 | 27.747664 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* The purpose of this class is to generate new random seeds from a master
* seed. This is needed to make the Random().next*() calls in rumen and mumak
* deterministic so that mumak simulations become deterministically replayable.
*
* In these tools we need many independent streams of random numbers, some of
* which are created dynamically. We seed these streams with the sub-seeds
* returned by RandomSeedGenerator.
*
* For a slightly more complicated approach to generating multiple streams of
* random numbers with better theoretical guarantees, see
* P. L'Ecuyer, R. Simard, E. J. Chen, and W. D. Kelton,
* ``An Objected-Oriented Random-Number Package with Many Long Streams and
* Substreams'', Operations Research, 50, 6 (2002), 1073--1075
* http://www.iro.umontreal.ca/~lecuyer/papers.html
* http://www.iro.umontreal.ca/~lecuyer/myftp/streams00/
*/
public class RandomSeedGenerator {
private static Log LOG = LogFactory.getLog(RandomSeedGenerator.class);
private static final Charset UTF_8 = Charset.forName("UTF-8");
/** MD5 algorithm instance, one for each thread. */
private static final ThreadLocal<MessageDigest> md5Holder =
new ThreadLocal<MessageDigest>() {
@Override protected MessageDigest initialValue() {
MessageDigest md5 = null;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException nsae) {
throw new RuntimeException("Can't create MD5 digests", nsae);
}
return md5;
}
};
/**
* Generates a new random seed.
*
* @param streamId a string identifying the stream of random numbers
* @param masterSeed higher level master random seed
* @return the random seed. Different (streamId, masterSeed) pairs result in
* (vastly) different random seeds.
*/
public static long getSeed(String streamId, long masterSeed) {
MessageDigest md5 = md5Holder.get();
md5.reset();
//'/' : make sure that we don't get the same str from ('11',0) and ('1',10)
// We could have fed the bytes of masterSeed one by one to md5.update()
// instead
String str = streamId + '/' + masterSeed;
byte[] digest = md5.digest(str.getBytes(UTF_8));
// Create a long from the first 8 bytes of the digest
// This is fine as MD5 has the avalanche property.
// Paranoids could have XOR folded the other 8 bytes in too.
long seed = 0;
for (int i=0; i<8; i++) {
seed = (seed<<8) + ((int)digest[i]+128);
}
return seed;
}
}
| 3,616 | 40.102273 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobHistoryParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
/**
* {@link JobHistoryParser} defines the interface of a Job History file parser.
*/
public interface JobHistoryParser extends Closeable {
/**
* Get the next {@link HistoryEvent}
* @return the next {@link HistoryEvent}. If no more events left, return null.
* @throws IOException
*/
HistoryEvent nextEvent() throws IOException;
}
| 1,315 | 34.567568 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Histogram.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package org.apache.hadoop.tools.rumen;
import java.io.PrintStream;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
/**
* {@link Histogram} represents an ordered summary of a sequence of {@code long}
* s which can be queried to produce a discrete approximation of its cumulative
* distribution function
*
*/
class Histogram implements Iterable<Map.Entry<Long, Long>> {
private TreeMap<Long, Long> content = new TreeMap<Long, Long>();
private String name;
private long totalCount;
public Histogram() {
this("(anonymous)");
}
public Histogram(String name) {
super();
this.name = name;
totalCount = 0L;
}
public void dump(PrintStream stream) {
stream.print("dumping Histogram " + name + ":\n");
Iterator<Map.Entry<Long, Long>> iter = iterator();
while (iter.hasNext()) {
Map.Entry<Long, Long> ent = iter.next();
stream.print("val/count pair: " + (long) ent.getKey() + ", "
+ (long) ent.getValue() + "\n");
}
stream.print("*** end *** \n");
}
public Iterator<Map.Entry<Long, Long>> iterator() {
return content.entrySet().iterator();
}
public long get(long key) {
Long result = content.get(key);
return result == null ? 0 : result;
}
public long getTotalCount() {
return totalCount;
}
public void enter(long value) {
Long existingValue = content.get(value);
if (existingValue == null) {
content.put(value, 1L);
} else {
content.put(value, existingValue + 1L);
}
++totalCount;
}
/**
* Produces a discrete approximation of the CDF. The user provides the points
* on the {@code Y} axis he wants, and we give the corresponding points on the
* {@code X} axis, plus the minimum and maximum from the data.
*
* @param scale
* the denominator applied to every element of buckets. For example,
* if {@code scale} is {@code 1000}, a {@code buckets} element of 500
* will specify the median in that output slot.
* @param buckets
* an array of int, all less than scale and each strictly greater
* than its predecessor if any. We don't check these requirements.
* @return a {@code long[]}, with two more elements than {@code buckets} has.
* The first resp. last element is the minimum resp. maximum value
* that was ever {@code enter}ed. The rest of the elements correspond
* to the elements of {@code buckets} and carry the first element
* whose rank is no less than {@code #content elements * scale /
* bucket}.
*
*/
public long[] getCDF(int scale, int[] buckets) {
if (totalCount == 0) {
return null;
}
long[] result = new long[buckets.length + 2];
// fill in the min and the max
result[0] = content.firstEntry().getKey();
result[buckets.length + 1] = content.lastEntry().getKey();
Iterator<Map.Entry<Long, Long>> iter = content.entrySet().iterator();
long cumulativeCount = 0;
int bucketCursor = 0;
// Loop invariant: the item at buckets[bucketCursor] can still be reached
// from iter, and the number of logged elements no longer available from
// iter is cumulativeCount.
//
// cumulativeCount/totalCount is therefore strictly less than
// buckets[bucketCursor]/scale .
while (iter.hasNext()) {
long targetCumulativeCount = buckets[bucketCursor] * totalCount / scale;
Map.Entry<Long, Long> elt = iter.next();
cumulativeCount += elt.getValue();
while (cumulativeCount >= targetCumulativeCount) {
result[bucketCursor + 1] = elt.getKey();
++bucketCursor;
if (bucketCursor < buckets.length) {
targetCumulativeCount = buckets[bucketCursor] * totalCount / scale;
} else {
break;
}
}
if (bucketCursor == buckets.length) {
break;
}
}
return result;
}
}
| 4,806 | 28.133333 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.List;
import org.apache.hadoop.mapred.TaskStatus.State;
/**
* {@link MapTaskAttemptInfo} represents the information with regard to a
* map task attempt.
*/
public class MapTaskAttemptInfo extends TaskAttemptInfo {
private long runtime;
public MapTaskAttemptInfo(State state, TaskInfo taskInfo,
long runtime, List<List<Integer>> allSplits) {
super(state, taskInfo,
allSplits == null
? LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector()
: allSplits);
this.runtime = runtime;
}
/**
*
* @deprecated please use the constructor with
* {@code (state, taskInfo, runtime,
* List<List<Integer>> allSplits)}
* instead.
*
* see {@link LoggedTaskAttempt} for an explanation of
* {@code allSplits}.
*
* If there are no known splits, use {@code null}.
*/
@Deprecated
public MapTaskAttemptInfo(State state, TaskInfo taskInfo,
long runtime) {
this(state, taskInfo, runtime, null);
}
@Override
public long getRuntime() {
return getMapRuntime();
}
/**
* Get the runtime for the <b>map</b> phase of the map-task attempt.
*
* @return the runtime for the <b>map</b> phase of the map-task attempt
*/
public long getMapRuntime() {
return runtime;
}
}
| 2,240 | 29.69863 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.text.ParseException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskUpdatedEvent;
public class Task20LineHistoryEventEmitter extends HistoryEventEmitter {
static List<SingleEventEmitter> nonFinals =
new LinkedList<SingleEventEmitter>();
static List<SingleEventEmitter> finals = new LinkedList<SingleEventEmitter>();
Long originalStartTime = null;
TaskType originalTaskType = null;
static {
nonFinals.add(new TaskStartedEventEmitter());
nonFinals.add(new TaskUpdatedEventEmitter());
finals.add(new TaskFinishedEventEmitter());
finals.add(new TaskFailedEventEmitter());
}
protected Task20LineHistoryEventEmitter() {
super();
}
static private class TaskStartedEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String taskType = line.get("TASK_TYPE");
String startTime = line.get("START_TIME");
String splits = line.get("SPLITS");
if (startTime != null && taskType != null) {
Task20LineHistoryEventEmitter that =
(Task20LineHistoryEventEmitter) thatg;
that.originalStartTime = Long.parseLong(startTime);
that.originalTaskType =
Version20LogInterfaceUtils.get20TaskType(taskType);
return new TaskStartedEvent(taskID, that.originalStartTime,
that.originalTaskType, splits);
}
return null;
}
}
static private class TaskUpdatedEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String finishTime = line.get("FINISH_TIME");
if (finishTime != null) {
return new TaskUpdatedEvent(taskID, Long.parseLong(finishTime));
}
return null;
}
}
static private class TaskFinishedEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String status = line.get("TASK_STATUS");
String finishTime = line.get("FINISH_TIME");
String error = line.get("ERROR");
String counters = line.get("COUNTERS");
if (finishTime != null && error == null
&& (status != null && status.equalsIgnoreCase("success"))) {
Counters eventCounters = maybeParseCounters(counters);
Task20LineHistoryEventEmitter that =
(Task20LineHistoryEventEmitter) thatg;
if (that.originalTaskType == null) {
return null;
}
return new TaskFinishedEvent(taskID, null, Long.parseLong(finishTime),
that.originalTaskType, status, eventCounters);
}
return null;
}
}
static private class TaskFailedEventEmitter extends SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String status = line.get("TASK_STATUS");
String finishTime = line.get("FINISH_TIME");
String taskType = line.get("TASK_TYPE");
String error = line.get("ERROR");
if (finishTime != null
&& (error != null || (status != null && !status
.equalsIgnoreCase("success")))) {
Task20LineHistoryEventEmitter that =
(Task20LineHistoryEventEmitter) thatg;
TaskType originalTaskType =
that.originalTaskType == null ? Version20LogInterfaceUtils
.get20TaskType(taskType) : that.originalTaskType;
return new TaskFailedEvent(taskID, Long.parseLong(finishTime),
originalTaskType, error, status, null);
}
return null;
}
}
@Override
List<SingleEventEmitter> finalSEEs() {
return finals;
}
@Override
List<SingleEventEmitter> nonFinalSEEs() {
return nonFinals;
}
}
| 5,627 | 29.754098 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package org.apache.hadoop.tools.rumen;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.EOFException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.Decompressor;
import org.codehaus.jackson.JsonProcessingException;
/**
* This is the main class for rumen log mining functionality.
*
* It reads a directory of job tracker logs, and computes various information
* about it. See {@code usage()}, below.
*
*/
@Deprecated
public class HadoopLogsAnalyzer extends Configured implements Tool {
// output streams
private PrintStream statusOutput = System.out;
private PrintStream statisticalOutput = System.out;
private static PrintStream staticDebugOutput = System.err;
/**
* The number of splits a task can have, before we ignore them all.
*/
private final static int MAXIMUM_PREFERRED_LOCATIONS = 25;
/**
* This element is to compensate for the fact that our percentiles engine
* rounds up for the expected sample count, so if the total number of readings
* is small enough we need to compensate slightly when aggregating the spread
* data from jobs with few reducers together with jobs with many reducers.
*/
private static final long SMALL_SPREAD_COMPENSATION_THRESHOLD = 5L;
/**
* {@code MAXIMUM_CLOCK_SKEW} is the maximum plausible difference between the
* clocks of machines in the same cluster. This is important because an event
* that logically must follow a second event will be considered non-anomalous
* if it precedes that second event, provided they happen on different
* machines.
*/
private static final long MAXIMUM_CLOCK_SKEW = 10000L;
/**
* The regular expression used to parse task attempt IDs in job tracker logs
*/
private final static Pattern taskAttemptIDPattern =
Pattern.compile(".*_([0-9]+)");
private final static Pattern xmlFilePrefix = Pattern.compile("[ \t]*<");
private final static Pattern confFileHeader = Pattern.compile("_conf.xml!!");
private final Map<String, Pattern> counterPatterns =
new HashMap<String, Pattern>();
/**
* The unpaired job config file. Currently only used to glean the {@code -Xmx}
* field of the JRE options
*/
private ParsedConfigFile jobconf = null;
/**
* Set by {@code -omit-task-details}. If true, we <i>only</i> emit the job
* digest [statistical info], not the detailed job trace.
*/
private boolean omitTaskDetails = false;
private Outputter<LoggedJob> jobTraceGen = null;
private boolean prettyprintTrace = true;
private LoggedJob jobBeingTraced = null;
private Map<String, LoggedTask> tasksInCurrentJob;
private Map<String, LoggedTaskAttempt> attemptsInCurrentJob;
private Histogram[] successfulMapAttemptTimes;
private Histogram successfulReduceAttemptTimes;
private Histogram[] failedMapAttemptTimes;
private Histogram failedReduceAttemptTimes;
private Histogram successfulNthMapperAttempts;
private Histogram successfulNthReducerAttempts;
private Histogram mapperLocality;
static final private Log LOG = LogFactory.getLog(HadoopLogsAnalyzer.class);
private int[] attemptTimesPercentiles;
private Outputter<LoggedNetworkTopology> topologyGen = null;
private HashSet<ParsedHost> allHosts = new HashSet<ParsedHost>();
// number of ticks per second
private boolean collecting = false;
private long lineNumber = 0;
private String rereadableLine = null;
private String inputFilename;
private boolean inputIsDirectory = false;
private Path inputDirectoryPath = null;
private String[] inputDirectoryFiles = null;
private int inputDirectoryCursor = -1;
private LineReader input = null;
private CompressionCodec inputCodec = null;
private Decompressor inputDecompressor = null;
private Text inputLineText = new Text();
private boolean debug = false;
private int version = 0;
private int numberBuckets = 99;
private int spreadMin;
private int spreadMax;
private boolean spreading = false;
private boolean delays = false;
private boolean runtimes = false;
private boolean collectTaskTimes = false;
private LogRecordType canonicalJob = LogRecordType.intern("Job");
private LogRecordType canonicalMapAttempt =
LogRecordType.intern("MapAttempt");
private LogRecordType canonicalReduceAttempt =
LogRecordType.intern("ReduceAttempt");
private LogRecordType canonicalTask = LogRecordType.intern("Task");
private static Pattern streamingJobnamePattern =
Pattern.compile("streamjob\\d+.jar");
private HashSet<String> hostNames = new HashSet<String>();
private boolean fileFirstLine = true;
private String currentFileName = null;
// Here are the cumulative statistics.
enum JobOutcome {
SUCCESS, FAILURE, OVERALL
};
/**
* These rectangular arrays of {@link Histogram}s are indexed by the job type
* [java, streaming, pig or pipes] and then by the outcome [success or
* failure]
*/
private Histogram runTimeDists[][];
private Histogram delayTimeDists[][];
private Histogram mapTimeSpreadDists[][];
private Histogram shuffleTimeSpreadDists[][];
private Histogram sortTimeSpreadDists[][];
private Histogram reduceTimeSpreadDists[][];
private Histogram mapTimeDists[][];
private Histogram shuffleTimeDists[][];
private Histogram sortTimeDists[][];
private Histogram reduceTimeDists[][];
private Map<String, Long> taskAttemptStartTimes;
private Map<String, Long> taskReduceAttemptShuffleEndTimes;
private Map<String, Long> taskReduceAttemptSortEndTimes;
private Map<String, Long> taskMapAttemptFinishTimes;
private Map<String, Long> taskReduceAttemptFinishTimes;
private long submitTimeCurrentJob;
private long launchTimeCurrentJob;
private String currentJobID;
// TODO this is currently not being set correctly. We should fix it.
// That only matters for statistics extraction.
private LoggedJob.JobType thisJobType;
private Histogram[][] newDistributionBlock() {
return newDistributionBlock(null);
}
private Histogram[][] newDistributionBlock(String blockname) {
Histogram[][] result = new Histogram[JobOutcome.values().length][];
for (int i = 0; i < JobOutcome.values().length; ++i) {
result[i] = new Histogram[LoggedJob.JobType.values().length];
for (int j = 0; j < LoggedJob.JobType.values().length; ++j) {
result[i][j] =
blockname == null ? new Histogram() : new Histogram(blockname);
}
}
return result;
}
private Histogram getDistribution(Histogram[][] block, JobOutcome outcome,
LoggedJob.JobType type) {
return block[outcome.ordinal()][type.ordinal()];
}
private void usage() {
statusOutput
.print("Usage: \n"
+ "administrative subcommands:\n"
+ "-v1 specify version 1 of the jt logs\n"
+ "-h or -help print this message\n"
+ "-d or -debug print voluminous debug info during processing\n"
+ "-collect-prefixes collect the prefixes of log lines\n\n"
+ " job trace subcommands\n"
+ "-write-job-trace takes a filename.\n"
+ " writes job trace in JSON to that filename\n"
+ "-single-line-job-traces omit prettyprinting of job trace\n"
+ "-omit-task-details leave out info about each task and attempt,\n"
+ " so only statistical info is added to each job\n"
+ "-write-topology takes a filename.\n"
+ " writes JSON file giving network topology\n"
+ "-job-digest-spectra takes a list of percentile points\n"
+ " writes CDFs with min, max, and those percentiles\n\n"
+ "subcommands for task statistical info\n"
+ "-spreads we have a mode where, for each job, we can\n"
+ " develop the ratio of percentile B to percentile A\n"
+ " of task run times. Having developed that ratio,\n"
+ " we can consider it to be a datum and we can\n"
+ " build a CDF of those ratios. -spreads turns\n"
+ " this option on, and takes A and B\n"
+ "-delays tells us to gather and print CDFs for delays\n"
+ " from job submit to job start\n"
+ "-runtimes prints CDFs of job wallclock times [launch\n"
+ " to finish]\n"
+ "-tasktimes prints CDFs of job wallclock times [launch\n"
+ " to finish]\n\n");
}
public HadoopLogsAnalyzer() {
super();
}
private boolean pathIsDirectory(Path p) throws IOException {
FileSystem fs = p.getFileSystem(getConf());
return fs.getFileStatus(p).isDirectory();
}
/**
* @param args
* string arguments. See {@code usage()}
* @throws FileNotFoundException
* @throws IOException
*/
private int initializeHadoopLogsAnalyzer(String[] args)
throws FileNotFoundException, IOException {
Path jobTraceFilename = null;
Path topologyFilename = null;
if (args.length == 0 || args[args.length - 1].charAt(0) == '-') {
throw new IllegalArgumentException("No input specified.");
} else {
inputFilename = args[args.length - 1];
}
for (int i = 0; i < args.length - (inputFilename == null ? 0 : 1); ++i) {
if (StringUtils.equalsIgnoreCase("-h", args[i])
|| StringUtils.equalsIgnoreCase("-help", args[i])) {
usage();
return 0;
}
if (StringUtils.equalsIgnoreCase("-c", args[i])
|| StringUtils.equalsIgnoreCase("-collect-prefixes", args[i])) {
collecting = true;
continue;
}
// these control the job digest
if (StringUtils.equalsIgnoreCase("-write-job-trace", args[i])) {
++i;
jobTraceFilename = new Path(args[i]);
continue;
}
if (StringUtils.equalsIgnoreCase("-single-line-job-traces", args[i])) {
prettyprintTrace = false;
continue;
}
if (StringUtils.equalsIgnoreCase("-omit-task-details", args[i])) {
omitTaskDetails = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-write-topology", args[i])) {
++i;
topologyFilename = new Path(args[i]);
continue;
}
if (StringUtils.equalsIgnoreCase("-job-digest-spectra", args[i])) {
ArrayList<Integer> values = new ArrayList<Integer>();
++i;
while (i < args.length && Character.isDigit(args[i].charAt(0))) {
values.add(Integer.parseInt(args[i]));
++i;
}
if (values.size() == 0) {
throw new IllegalArgumentException("Empty -job-digest-spectra list");
}
attemptTimesPercentiles = new int[values.size()];
int lastValue = 0;
for (int j = 0; j < attemptTimesPercentiles.length; ++j) {
if (values.get(j) <= lastValue || values.get(j) >= 100) {
throw new IllegalArgumentException(
"Bad -job-digest-spectra percentiles list");
}
attemptTimesPercentiles[j] = values.get(j);
}
--i;
continue;
}
if (StringUtils.equalsIgnoreCase("-d", args[i])
|| StringUtils.equalsIgnoreCase("-debug", args[i])) {
debug = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-spreads", args[i])) {
int min = Integer.parseInt(args[i + 1]);
int max = Integer.parseInt(args[i + 2]);
if (min < max && min < 1000 && max < 1000) {
spreadMin = min;
spreadMax = max;
spreading = true;
i += 2;
}
continue;
}
// These control log-wide CDF outputs
if (StringUtils.equalsIgnoreCase("-delays", args[i])) {
delays = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-runtimes", args[i])) {
runtimes = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-tasktimes", args[i])) {
collectTaskTimes = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-v1", args[i])) {
version = 1;
continue;
}
throw new IllegalArgumentException("Unrecognized argument: " + args[i]);
}
runTimeDists = newDistributionBlock();
delayTimeDists = newDistributionBlock();
mapTimeSpreadDists = newDistributionBlock("map-time-spreads");
shuffleTimeSpreadDists = newDistributionBlock();
sortTimeSpreadDists = newDistributionBlock();
reduceTimeSpreadDists = newDistributionBlock();
mapTimeDists = newDistributionBlock();
shuffleTimeDists = newDistributionBlock();
sortTimeDists = newDistributionBlock();
reduceTimeDists = newDistributionBlock();
taskAttemptStartTimes = new HashMap<String, Long>();
taskReduceAttemptShuffleEndTimes = new HashMap<String, Long>();
taskReduceAttemptSortEndTimes = new HashMap<String, Long>();
taskMapAttemptFinishTimes = new HashMap<String, Long>();
taskReduceAttemptFinishTimes = new HashMap<String, Long>();
final Path inputPath = new Path(inputFilename);
inputIsDirectory = pathIsDirectory(inputPath);
if (jobTraceFilename != null && attemptTimesPercentiles == null) {
attemptTimesPercentiles = new int[19];
for (int i = 0; i < 19; ++i) {
attemptTimesPercentiles[i] = (i + 1) * 5;
}
}
if (!inputIsDirectory) {
input = maybeUncompressedPath(inputPath);
} else {
inputDirectoryPath = inputPath;
FileSystem fs = inputPath.getFileSystem(getConf());
FileStatus[] statuses = fs.listStatus(inputPath);
inputDirectoryFiles = new String[statuses.length];
for (int i = 0; i < statuses.length; ++i) {
inputDirectoryFiles[i] = statuses[i].getPath().getName();
}
// filter out the .crc files, if any
int dropPoint = 0;
for (int i = 0; i < inputDirectoryFiles.length; ++i) {
String name = inputDirectoryFiles[i];
if (!(name.length() >= 4 && ".crc".equals(name
.substring(name.length() - 4)))) {
inputDirectoryFiles[dropPoint++] = name;
}
}
LOG.info("We dropped " + (inputDirectoryFiles.length - dropPoint)
+ " crc files.");
String[] new_inputDirectoryFiles = new String[dropPoint];
System.arraycopy(inputDirectoryFiles, 0, new_inputDirectoryFiles, 0,
dropPoint);
inputDirectoryFiles = new_inputDirectoryFiles;
Arrays.sort(inputDirectoryFiles);
if (!setNextDirectoryInputStream()) {
throw new FileNotFoundException("Empty directory specified.");
}
}
if (jobTraceFilename != null) {
jobTraceGen = new DefaultOutputter<LoggedJob>();
jobTraceGen.init(jobTraceFilename, getConf());
if (topologyFilename != null) {
topologyGen = new DefaultOutputter<LoggedNetworkTopology>();
topologyGen.init(topologyFilename, getConf());
}
}
return 0;
}
private LineReader maybeUncompressedPath(Path p)
throws FileNotFoundException, IOException {
CompressionCodecFactory codecs = new CompressionCodecFactory(getConf());
inputCodec = codecs.getCodec(p);
FileSystem fs = p.getFileSystem(getConf());
FSDataInputStream fileIn = fs.open(p);
if (inputCodec == null) {
return new LineReader(fileIn, getConf());
} else {
inputDecompressor = CodecPool.getDecompressor(inputCodec);
return new LineReader(inputCodec.createInputStream(fileIn,
inputDecompressor), getConf());
}
}
private boolean setNextDirectoryInputStream() throws FileNotFoundException,
IOException {
if (input != null) {
input.close();
LOG.info("File closed: " + currentFileName);
input = null;
}
if (inputCodec != null) {
CodecPool.returnDecompressor(inputDecompressor);
inputDecompressor = null;
inputCodec = null;
}
++inputDirectoryCursor;
if (inputDirectoryCursor >= inputDirectoryFiles.length) {
return false;
}
fileFirstLine = true;
currentFileName = inputDirectoryFiles[inputDirectoryCursor];
LOG.info("\nOpening file " + currentFileName
+ " *************************** .");
LOG
.info("This file, " + (inputDirectoryCursor + 1) + "/"
+ inputDirectoryFiles.length + ", starts with line " + lineNumber
+ ".");
input =
maybeUncompressedPath(new Path(inputDirectoryPath, currentFileName));
return true;
}
private String readInputLine() throws IOException {
try {
if (input == null) {
return null;
}
inputLineText.clear();
if (input.readLine(inputLineText) == 0) {
return null;
}
return inputLineText.toString();
} catch (EOFException e) {
return null;
}
}
private String readCountedLine() throws IOException {
if (rereadableLine != null) {
String result = rereadableLine;
rereadableLine = null;
return result;
}
String result = readInputLine();
if (result != null) {
if (fileFirstLine && (result.equals("") || result.charAt(0) != '\f')) {
fileFirstLine = false;
rereadableLine = result;
return "\f!!FILE " + currentFileName + "!!\n";
}
fileFirstLine = false;
++lineNumber;
} else if (inputIsDirectory && setNextDirectoryInputStream()) {
result = readCountedLine();
}
return result;
}
private void unreadCountedLine(String unreadee) {
if (rereadableLine == null) {
rereadableLine = unreadee;
}
}
private boolean apparentConfFileHeader(String header) {
return confFileHeader.matcher(header).find();
}
private boolean apparentXMLFileStart(String line) {
return xmlFilePrefix.matcher(line).lookingAt();
}
// This can return either the Pair of the !!file line and the XMLconf
// file, or null and an ordinary line. Returns just null if there's
// no more input.
private Pair<String, String> readBalancedLine() throws IOException {
String line = readCountedLine();
if (line == null) {
return null;
}
while (line.indexOf('\f') > 0) {
line = line.substring(line.indexOf('\f'));
}
if (line.length() != 0 && line.charAt(0) == '\f') {
String subjectLine = readCountedLine();
if (subjectLine != null && subjectLine.length() != 0
&& apparentConfFileHeader(line) && apparentXMLFileStart(subjectLine)) {
StringBuilder sb = new StringBuilder();
while (subjectLine != null && subjectLine.indexOf('\f') > 0) {
subjectLine = subjectLine.substring(subjectLine.indexOf('\f'));
}
while (subjectLine != null
&& (subjectLine.length() == 0 || subjectLine.charAt(0) != '\f')) {
sb.append(subjectLine);
subjectLine = readCountedLine();
}
if (subjectLine != null) {
unreadCountedLine(subjectLine);
}
return new Pair<String, String>(line, sb.toString());
}
// here we had a file line, but it introduced a log segment, not
// a conf file. We want to just ignore the file line.
return readBalancedLine();
}
String endlineString = (version == 0 ? " " : " .");
if (line.length() < endlineString.length()) {
return new Pair<String, String>(null, line);
}
if (!endlineString.equals(line.substring(line.length()
- endlineString.length()))) {
StringBuilder sb = new StringBuilder(line);
String addedLine;
do {
addedLine = readCountedLine();
if (addedLine == null) {
return new Pair<String, String>(null, sb.toString());
}
while (addedLine.indexOf('\f') > 0) {
addedLine = addedLine.substring(addedLine.indexOf('\f'));
}
if (addedLine.length() > 0 && addedLine.charAt(0) == '\f') {
unreadCountedLine(addedLine);
return new Pair<String, String>(null, sb.toString());
}
sb.append("\n");
sb.append(addedLine);
} while (!endlineString.equals(addedLine.substring(addedLine.length()
- endlineString.length())));
line = sb.toString();
}
return new Pair<String, String>(null, line);
}
private void incorporateSpread(Histogram taskTimes, Histogram[][] spreadTo,
JobOutcome outcome, LoggedJob.JobType jtype) {
if (!spreading) {
return;
}
if (taskTimes.getTotalCount() <= 1) {
return;
}
// there are some literals here that probably should be options
int[] endpoints = new int[2];
endpoints[0] = spreadMin;
endpoints[1] = spreadMax;
long[] endpointKeys = taskTimes.getCDF(1000, endpoints);
int smallResultOffset =
(taskTimes.getTotalCount() < SMALL_SPREAD_COMPENSATION_THRESHOLD ? 1
: 0);
Histogram myTotal = spreadTo[outcome.ordinal()][jtype.ordinal()];
long dividend = endpointKeys[2 + smallResultOffset];
long divisor = endpointKeys[1 - smallResultOffset];
if (divisor > 0) {
long mytotalRatio = dividend * 1000000L / divisor;
myTotal.enter(mytotalRatio);
}
}
private void canonicalDistributionsEnter(Histogram[][] block,
JobOutcome outcome, LoggedJob.JobType type, long value) {
getDistribution(block, outcome, type).enter(value);
getDistribution(block, JobOutcome.OVERALL, type).enter(value);
getDistribution(block, outcome, LoggedJob.JobType.OVERALL).enter(value);
getDistribution(block, JobOutcome.OVERALL, LoggedJob.JobType.OVERALL)
.enter(value);
}
private void processJobLine(ParsedLine line) throws JsonProcessingException,
IOException {
try {
if (version == 0 || version == 1) {
// determine the job type if this is the declaration line
String jobID = line.get("JOBID");
String user = line.get("USER");
String jobPriority = line.get("JOB_PRIORITY");
String submitTime = line.get("SUBMIT_TIME");
String jobName = line.get("JOBNAME");
String launchTime = line.get("LAUNCH_TIME");
String finishTime = line.get("FINISH_TIME");
String status = line.get("JOB_STATUS");
String totalMaps = line.get("TOTAL_MAPS");
String totalReduces = line.get("TOTAL_REDUCES");
/*
* If the job appears new [the ID is different from the most recent one,
* if any] we make a new LoggedJob.
*/
if (jobID != null
&& jobTraceGen != null
&& (jobBeingTraced == null
|| !jobID.equals(jobBeingTraced.getJobID().toString()))) {
// push out the old job if there is one, even though it did't get
// mated
// with a conf.
finalizeJob();
jobBeingTraced = new LoggedJob(jobID);
tasksInCurrentJob = new HashMap<String, LoggedTask>();
attemptsInCurrentJob = new HashMap<String, LoggedTaskAttempt>();
// initialize all the per-job statistics gathering places
successfulMapAttemptTimes =
new Histogram[ParsedHost.numberOfDistances() + 1];
for (int i = 0; i < successfulMapAttemptTimes.length; ++i) {
successfulMapAttemptTimes[i] = new Histogram();
}
successfulReduceAttemptTimes = new Histogram();
failedMapAttemptTimes =
new Histogram[ParsedHost.numberOfDistances() + 1];
for (int i = 0; i < failedMapAttemptTimes.length; ++i) {
failedMapAttemptTimes[i] = new Histogram();
}
failedReduceAttemptTimes = new Histogram();
successfulNthMapperAttempts = new Histogram();
successfulNthReducerAttempts = new Histogram();
mapperLocality = new Histogram();
}
// here we fill in all the stuff the trace might need
if (jobBeingTraced != null) {
if (user != null) {
jobBeingTraced.setUser(user);
}
if (jobPriority != null) {
jobBeingTraced.setPriority(LoggedJob.JobPriority
.valueOf(jobPriority));
}
if (totalMaps != null) {
jobBeingTraced.setTotalMaps(Integer.parseInt(totalMaps));
}
if (totalReduces != null) {
jobBeingTraced.setTotalReduces(Integer.parseInt(totalReduces));
}
if (submitTime != null) {
jobBeingTraced.setSubmitTime(Long.parseLong(submitTime));
}
if (launchTime != null) {
jobBeingTraced.setLaunchTime(Long.parseLong(launchTime));
}
if (finishTime != null) {
jobBeingTraced.setFinishTime(Long.parseLong(finishTime));
if (status != null) {
jobBeingTraced.setOutcome(Pre21JobHistoryConstants.Values
.valueOf(status));
}
maybeMateJobAndConf();
}
}
if (jobName != null) {
// we'll make it java unless the name parses out
Matcher m = streamingJobnamePattern.matcher(jobName);
thisJobType = LoggedJob.JobType.JAVA;
if (m.matches()) {
thisJobType = LoggedJob.JobType.STREAMING;
}
}
if (submitTime != null) {
submitTimeCurrentJob = Long.parseLong(submitTime);
currentJobID = jobID;
taskAttemptStartTimes = new HashMap<String, Long>();
taskReduceAttemptShuffleEndTimes = new HashMap<String, Long>();
taskReduceAttemptSortEndTimes = new HashMap<String, Long>();
taskMapAttemptFinishTimes = new HashMap<String, Long>();
taskReduceAttemptFinishTimes = new HashMap<String, Long>();
launchTimeCurrentJob = 0L;
} else if (launchTime != null && jobID != null
&& currentJobID.equals(jobID)) {
launchTimeCurrentJob = Long.parseLong(launchTime);
} else if (finishTime != null && jobID != null
&& currentJobID.equals(jobID)) {
long endTime = Long.parseLong(finishTime);
if (launchTimeCurrentJob != 0) {
String jobResultText = line.get("JOB_STATUS");
JobOutcome thisOutcome =
((jobResultText != null && "SUCCESS".equals(jobResultText))
? JobOutcome.SUCCESS : JobOutcome.FAILURE);
if (submitTimeCurrentJob != 0L) {
canonicalDistributionsEnter(delayTimeDists, thisOutcome,
thisJobType, launchTimeCurrentJob - submitTimeCurrentJob);
}
if (launchTimeCurrentJob != 0L) {
canonicalDistributionsEnter(runTimeDists, thisOutcome,
thisJobType, endTime - launchTimeCurrentJob);
}
// Now we process the hash tables with successful task attempts
Histogram currentJobMapTimes = new Histogram();
Histogram currentJobShuffleTimes = new Histogram();
Histogram currentJobSortTimes = new Histogram();
Histogram currentJobReduceTimes = new Histogram();
Iterator<Map.Entry<String, Long>> taskIter =
taskAttemptStartTimes.entrySet().iterator();
while (taskIter.hasNext()) {
Map.Entry<String, Long> entry = taskIter.next();
long startTime = entry.getValue();
// Map processing
Long mapEndTime = taskMapAttemptFinishTimes.get(entry.getKey());
if (mapEndTime != null) {
currentJobMapTimes.enter(mapEndTime - startTime);
canonicalDistributionsEnter(mapTimeDists, thisOutcome,
thisJobType, mapEndTime - startTime);
}
// Reduce processing
Long shuffleEnd =
taskReduceAttemptShuffleEndTimes.get(entry.getKey());
Long sortEnd = taskReduceAttemptSortEndTimes.get(entry.getKey());
Long reduceEnd = taskReduceAttemptFinishTimes.get(entry.getKey());
if (shuffleEnd != null && sortEnd != null && reduceEnd != null) {
currentJobShuffleTimes.enter(shuffleEnd - startTime);
currentJobSortTimes.enter(sortEnd - shuffleEnd);
currentJobReduceTimes.enter(reduceEnd - sortEnd);
canonicalDistributionsEnter(shuffleTimeDists, thisOutcome,
thisJobType, shuffleEnd - startTime);
canonicalDistributionsEnter(sortTimeDists, thisOutcome,
thisJobType, sortEnd - shuffleEnd);
canonicalDistributionsEnter(reduceTimeDists, thisOutcome,
thisJobType, reduceEnd - sortEnd);
}
}
// Here we save out the task information
incorporateSpread(currentJobMapTimes, mapTimeSpreadDists,
thisOutcome, thisJobType);
incorporateSpread(currentJobShuffleTimes, shuffleTimeSpreadDists,
thisOutcome, thisJobType);
incorporateSpread(currentJobSortTimes, sortTimeSpreadDists,
thisOutcome, thisJobType);
incorporateSpread(currentJobReduceTimes, reduceTimeSpreadDists,
thisOutcome, thisJobType);
}
}
}
} catch (NumberFormatException e) {
LOG.warn(
"HadoopLogsAnalyzer.processJobLine: bad numerical format, at line "
+ lineNumber + ".", e);
}
}
private void processTaskLine(ParsedLine line) {
if (jobBeingTraced != null) {
// these fields are in both the start and finish record
String taskID = line.get("TASKID");
String taskType = line.get("TASK_TYPE");
// this field is only in the start record
String startTime = line.get("START_TIME");
// these fields only exist or are only relevant in the finish record
String status = line.get("TASK_STATUS");
String finishTime = line.get("FINISH_TIME");
String splits = line.get("SPLITS");
LoggedTask task = tasksInCurrentJob.get(taskID);
boolean taskAlreadyLogged = task != null;
if (task == null) {
task = new LoggedTask();
}
if (splits != null) {
ArrayList<LoggedLocation> locations = null;
StringTokenizer tok = new StringTokenizer(splits, ",", false);
if (tok.countTokens() <= MAXIMUM_PREFERRED_LOCATIONS) {
locations = new ArrayList<LoggedLocation>();
}
while (tok.hasMoreTokens()) {
String nextSplit = tok.nextToken();
ParsedHost node = getAndRecordParsedHost(nextSplit);
if (locations != null && node != null) {
locations.add(node.makeLoggedLocation());
}
}
task.setPreferredLocations(locations);
}
task.setTaskID(taskID);
if (startTime != null) {
task.setStartTime(Long.parseLong(startTime));
}
if (finishTime != null) {
task.setFinishTime(Long.parseLong(finishTime));
}
Pre21JobHistoryConstants.Values typ;
Pre21JobHistoryConstants.Values stat;
try {
stat =
status == null ? null : Pre21JobHistoryConstants.Values
.valueOf(status);
} catch (IllegalArgumentException e) {
LOG.error("A task status you don't know about is \"" + status + "\".",
e);
stat = null;
}
task.setTaskStatus(stat);
try {
typ =
taskType == null ? null : Pre21JobHistoryConstants.Values
.valueOf(taskType);
} catch (IllegalArgumentException e) {
LOG.error("A task type you don't know about is \"" + taskType + "\".",
e);
typ = null;
}
if (typ == null) {
return;
}
task.setTaskType(typ);
List<LoggedTask> vec =
typ == Pre21JobHistoryConstants.Values.MAP ? jobBeingTraced
.getMapTasks() : typ == Pre21JobHistoryConstants.Values.REDUCE
? jobBeingTraced.getReduceTasks() : jobBeingTraced
.getOtherTasks();
if (!taskAlreadyLogged) {
vec.add(task);
tasksInCurrentJob.put(taskID, task);
}
}
}
private Pattern counterPattern(String counterName) {
Pattern result = counterPatterns.get(counterName);
if (result == null) {
String namePatternRegex =
"\\[\\(" + counterName + "\\)\\([^)]+\\)\\(([0-9]+)\\)\\]";
result = Pattern.compile(namePatternRegex);
counterPatterns.put(counterName, result);
}
return result;
}
private String parseCounter(String counterString, String counterName) {
if (counterString == null) {
return null;
}
Matcher mat = counterPattern(counterName).matcher(counterString);
if (mat.find()) {
return mat.group(1);
}
return null;
}
abstract class SetField {
LoggedTaskAttempt attempt;
SetField(LoggedTaskAttempt attempt) {
this.attempt = attempt;
}
abstract void set(long value);
}
private void incorporateCounter(SetField thunk, String counterString,
String counterName) {
String valueString = parseCounter(counterString, counterName);
if (valueString != null) {
thunk.set(Long.parseLong(valueString));
}
}
private void incorporateCounters(LoggedTaskAttempt attempt2,
String counterString) {
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.hdfsBytesRead = val;
}
}, counterString, "HDFS_BYTES_READ");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.hdfsBytesWritten = val;
}
}, counterString, "HDFS_BYTES_WRITTEN");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.fileBytesRead = val;
}
}, counterString, "FILE_BYTES_READ");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.fileBytesWritten = val;
}
}, counterString, "FILE_BYTES_WRITTEN");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.mapInputBytes = val;
}
}, counterString, "MAP_INPUT_BYTES");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.mapInputRecords = val;
}
}, counterString, "MAP_INPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.mapOutputBytes = val;
}
}, counterString, "MAP_OUTPUT_BYTES");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.mapOutputRecords = val;
}
}, counterString, "MAP_OUTPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.combineInputRecords = val;
}
}, counterString, "COMBINE_INPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.reduceInputGroups = val;
}
}, counterString, "REDUCE_INPUT_GROUPS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.reduceInputRecords = val;
}
}, counterString, "REDUCE_INPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.reduceShuffleBytes = val;
}
}, counterString, "REDUCE_SHUFFLE_BYTES");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.reduceOutputRecords = val;
}
}, counterString, "REDUCE_OUTPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.spilledRecords = val;
}
}, counterString, "SPILLED_RECORDS");
// incorporate CPU usage
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.getResourceUsageMetrics().setCumulativeCpuUsage(val);
}
}, counterString, "CPU_MILLISECONDS");
// incorporate virtual memory usage
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.getResourceUsageMetrics().setVirtualMemoryUsage(val);
}
}, counterString, "VIRTUAL_MEMORY_BYTES");
// incorporate physical memory usage
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.getResourceUsageMetrics().setPhysicalMemoryUsage(val);
}
}, counterString, "PHYSICAL_MEMORY_BYTES");
// incorporate heap usage
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.getResourceUsageMetrics().setHeapUsage(val);
}
}, counterString, "COMMITTED_HEAP_BYTES");
}
private ParsedHost getAndRecordParsedHost(String hostName) {
ParsedHost result = ParsedHost.parse(hostName);
if (result != null && !allHosts.contains(result)) {
allHosts.add(result);
}
return result;
}
private void processMapAttemptLine(ParsedLine line) {
String attemptID = line.get("TASK_ATTEMPT_ID");
String taskID = line.get("TASKID");
String status = line.get("TASK_STATUS");
String attemptStartTime = line.get("START_TIME");
String attemptFinishTime = line.get("FINISH_TIME");
String hostName = line.get("HOSTNAME");
String counters = line.get("COUNTERS");
if (jobBeingTraced != null && taskID != null) {
LoggedTask task = tasksInCurrentJob.get(taskID);
if (task == null) {
task = new LoggedTask();
task.setTaskID(taskID);
jobBeingTraced.getMapTasks().add(task);
tasksInCurrentJob.put(taskID, task);
}
task.setTaskID(taskID);
LoggedTaskAttempt attempt = attemptsInCurrentJob.get(attemptID);
boolean attemptAlreadyExists = attempt != null;
if (attempt == null) {
attempt = new LoggedTaskAttempt();
attempt.setAttemptID(attemptID);
}
if (!attemptAlreadyExists) {
attemptsInCurrentJob.put(attemptID, attempt);
task.getAttempts().add(attempt);
}
Pre21JobHistoryConstants.Values stat = null;
try {
stat =
status == null ? null : Pre21JobHistoryConstants.Values
.valueOf(status);
} catch (IllegalArgumentException e) {
LOG.error("A map attempt status you don't know about is \"" + status
+ "\".", e);
stat = null;
}
incorporateCounters(attempt, counters);
attempt.setResult(stat);
if (attemptStartTime != null) {
attempt.setStartTime(Long.parseLong(attemptStartTime));
}
if (attemptFinishTime != null) {
attempt.setFinishTime(Long.parseLong(attemptFinishTime));
}
int distance = Integer.MAX_VALUE;
if (hostName != null) {
ParsedHost host = getAndRecordParsedHost(hostName);
if (host != null) {
attempt.setHostName(host.getNodeName(), host.getRackName());
attempt.setLocation(host.makeLoggedLocation());
} else {
attempt.setHostName(hostName, null);
}
List<LoggedLocation> locs = task.getPreferredLocations();
if (host != null && locs != null) {
for (LoggedLocation loc : locs) {
ParsedHost preferedLoc = new ParsedHost(loc);
distance = Math.min(distance, preferedLoc.distance(host));
}
}
mapperLocality.enter(distance);
}
distance = Math.min(distance, successfulMapAttemptTimes.length - 1);
if (attempt.getStartTime() > 0 && attempt.getFinishTime() > 0) {
long runtime = attempt.getFinishTime() - attempt.getStartTime();
if (stat == Pre21JobHistoryConstants.Values.SUCCESS) {
successfulMapAttemptTimes[distance].enter(runtime);
}
if (stat == Pre21JobHistoryConstants.Values.FAILED) {
failedMapAttemptTimes[distance].enter(runtime);
}
}
if (attemptID != null) {
Matcher matcher = taskAttemptIDPattern.matcher(attemptID);
if (matcher.matches()) {
String attemptNumberString = matcher.group(1);
if (attemptNumberString != null) {
int attemptNumber = Integer.parseInt(attemptNumberString);
successfulNthMapperAttempts.enter(attemptNumber);
}
}
}
}
try {
if (attemptStartTime != null) {
long startTimeValue = Long.parseLong(attemptStartTime);
if (startTimeValue != 0
&& startTimeValue + MAXIMUM_CLOCK_SKEW >= launchTimeCurrentJob) {
taskAttemptStartTimes.put(attemptID, startTimeValue);
} else {
taskAttemptStartTimes.remove(attemptID);
}
} else if (status != null && attemptFinishTime != null) {
long finishTime = Long.parseLong(attemptFinishTime);
if (status.equals("SUCCESS")) {
taskMapAttemptFinishTimes.put(attemptID, finishTime);
}
}
} catch (NumberFormatException e) {
LOG.warn(
"HadoopLogsAnalyzer.processMapAttemptLine: bad numerical format, at line"
+ lineNumber + ".", e);
}
}
private void processReduceAttemptLine(ParsedLine line) {
String attemptID = line.get("TASK_ATTEMPT_ID");
String taskID = line.get("TASKID");
String status = line.get("TASK_STATUS");
String attemptStartTime = line.get("START_TIME");
String attemptFinishTime = line.get("FINISH_TIME");
String attemptShuffleFinished = line.get("SHUFFLE_FINISHED");
String attemptSortFinished = line.get("SORT_FINISHED");
String counters = line.get("COUNTERS");
String hostName = line.get("HOSTNAME");
if (hostName != null && !hostNames.contains(hostName)) {
hostNames.add(hostName);
}
if (jobBeingTraced != null && taskID != null) {
LoggedTask task = tasksInCurrentJob.get(taskID);
if (task == null) {
task = new LoggedTask();
task.setTaskID(taskID);
jobBeingTraced.getReduceTasks().add(task);
tasksInCurrentJob.put(taskID, task);
}
task.setTaskID(taskID);
LoggedTaskAttempt attempt = attemptsInCurrentJob.get(attemptID);
boolean attemptAlreadyExists = attempt != null;
if (attempt == null) {
attempt = new LoggedTaskAttempt();
attempt.setAttemptID(attemptID);
}
if (!attemptAlreadyExists) {
attemptsInCurrentJob.put(attemptID, attempt);
task.getAttempts().add(attempt);
}
Pre21JobHistoryConstants.Values stat = null;
try {
stat =
status == null ? null : Pre21JobHistoryConstants.Values
.valueOf(status);
} catch (IllegalArgumentException e) {
LOG.warn("A map attempt status you don't know about is \"" + status
+ "\".", e);
stat = null;
}
incorporateCounters(attempt, counters);
attempt.setResult(stat);
if (attemptStartTime != null) {
attempt.setStartTime(Long.parseLong(attemptStartTime));
}
if (attemptFinishTime != null) {
attempt.setFinishTime(Long.parseLong(attemptFinishTime));
}
if (attemptShuffleFinished != null) {
attempt.setShuffleFinished(Long.parseLong(attemptShuffleFinished));
}
if (attemptSortFinished != null) {
attempt.setSortFinished(Long.parseLong(attemptSortFinished));
}
if (attempt.getStartTime() > 0 && attempt.getFinishTime() > 0) {
long runtime = attempt.getFinishTime() - attempt.getStartTime();
if (stat == Pre21JobHistoryConstants.Values.SUCCESS) {
successfulReduceAttemptTimes.enter(runtime);
}
if (stat == Pre21JobHistoryConstants.Values.FAILED) {
failedReduceAttemptTimes.enter(runtime);
}
}
if (hostName != null) {
ParsedHost host = getAndRecordParsedHost(hostName);
if (host != null) {
attempt.setHostName(host.getNodeName(), host.getRackName());
} else {
attempt.setHostName(hostName, null);
}
}
if (attemptID != null) {
Matcher matcher = taskAttemptIDPattern.matcher(attemptID);
if (matcher.matches()) {
String attemptNumberString = matcher.group(1);
if (attemptNumberString != null) {
int attemptNumber = Integer.parseInt(attemptNumberString);
successfulNthReducerAttempts.enter(attemptNumber);
}
}
}
}
try {
if (attemptStartTime != null) {
long startTimeValue = Long.parseLong(attemptStartTime);
if (startTimeValue != 0
&& startTimeValue + MAXIMUM_CLOCK_SKEW >= launchTimeCurrentJob) {
taskAttemptStartTimes.put(attemptID, startTimeValue);
}
} else if (status != null && status.equals("SUCCESS")
&& attemptFinishTime != null) {
long finishTime = Long.parseLong(attemptFinishTime);
taskReduceAttemptFinishTimes.put(attemptID, finishTime);
if (attemptShuffleFinished != null) {
taskReduceAttemptShuffleEndTimes.put(attemptID, Long
.parseLong(attemptShuffleFinished));
}
if (attemptSortFinished != null) {
taskReduceAttemptSortEndTimes.put(attemptID, Long
.parseLong(attemptSortFinished));
}
}
} catch (NumberFormatException e) {
LOG.error(
"HadoopLogsAnalyzer.processReduceAttemptLine: bad numerical format, at line"
+ lineNumber + ".", e);
}
}
private void processParsedLine(ParsedLine line)
throws JsonProcessingException, IOException {
if (!collecting) {
// "Job", "MapAttempt", "ReduceAttempt", "Task"
LogRecordType myType = line.getType();
if (myType == canonicalJob) {
processJobLine(line);
} else if (myType == canonicalTask) {
processTaskLine(line);
} else if (myType == canonicalMapAttempt) {
processMapAttemptLine(line);
} else if (myType == canonicalReduceAttempt) {
processReduceAttemptLine(line);
} else {
}
}
}
private void printDistributionSet(String title, Histogram[][] distSet) {
statisticalOutput.print(title + "\n\n");
// print out buckets
for (int i = 0; i < JobOutcome.values().length; ++i) {
for (int j = 0; j < LoggedJob.JobType.values().length; ++j) {
JobOutcome thisOutcome = JobOutcome.values()[i];
LoggedJob.JobType thisType = LoggedJob.JobType.values()[j];
statisticalOutput.print("outcome = ");
statisticalOutput.print(thisOutcome.toString());
statisticalOutput.print(", and type = ");
statisticalOutput.print(thisType.toString());
statisticalOutput.print(".\n\n");
Histogram dist = distSet[i][j];
printSingleDistributionData(dist);
}
}
}
private void printSingleDistributionData(Histogram dist) {
int[] percentiles = new int[numberBuckets];
for (int k = 0; k < numberBuckets; ++k) {
percentiles[k] = k + 1;
}
long[] cdf = dist.getCDF(numberBuckets + 1, percentiles);
if (cdf == null) {
statisticalOutput.print("(No data)\n");
} else {
statisticalOutput.print("min: ");
statisticalOutput.print(cdf[0]);
statisticalOutput.print("\n");
for (int k = 0; k < numberBuckets; ++k) {
statisticalOutput.print(percentiles[k]);
statisticalOutput.print("% ");
statisticalOutput.print(cdf[k + 1]);
statisticalOutput.print("\n");
}
statisticalOutput.print("max: ");
statisticalOutput.print(cdf[numberBuckets + 1]);
statisticalOutput.print("\n");
}
}
private void maybeMateJobAndConf() throws IOException {
if (jobBeingTraced != null && jobconf != null
&& jobBeingTraced.getJobID().toString().equals(jobconf.jobID)) {
jobBeingTraced.setHeapMegabytes(jobconf.heapMegabytes);
jobBeingTraced.setQueue(jobconf.queue);
jobBeingTraced.setJobName(jobconf.jobName);
jobBeingTraced.setClusterMapMB(jobconf.clusterMapMB);
jobBeingTraced.setClusterReduceMB(jobconf.clusterReduceMB);
jobBeingTraced.setJobMapMB(jobconf.jobMapMB);
jobBeingTraced.setJobReduceMB(jobconf.jobReduceMB);
jobBeingTraced.setJobProperties(jobconf.properties);
jobconf = null;
finalizeJob();
}
}
private ArrayList<LoggedDiscreteCDF> mapCDFArrayList(Histogram[] data) {
ArrayList<LoggedDiscreteCDF> result = new ArrayList<LoggedDiscreteCDF>();
for (Histogram hist : data) {
LoggedDiscreteCDF discCDF = new LoggedDiscreteCDF();
discCDF.setCDF(hist, attemptTimesPercentiles, 100);
result.add(discCDF);
}
return result;
}
private void finalizeJob() throws IOException {
if (jobBeingTraced != null) {
if (omitTaskDetails) {
jobBeingTraced.setMapTasks(null);
jobBeingTraced.setReduceTasks(null);
jobBeingTraced.setOtherTasks(null);
}
// add digest info to the job
jobBeingTraced
.setSuccessfulMapAttemptCDFs(mapCDFArrayList(successfulMapAttemptTimes));
jobBeingTraced
.setFailedMapAttemptCDFs(mapCDFArrayList(failedMapAttemptTimes));
LoggedDiscreteCDF discCDF = new LoggedDiscreteCDF();
discCDF
.setCDF(successfulReduceAttemptTimes, attemptTimesPercentiles, 100);
jobBeingTraced.setSuccessfulReduceAttemptCDF(discCDF);
discCDF = new LoggedDiscreteCDF();
discCDF.setCDF(failedReduceAttemptTimes, attemptTimesPercentiles, 100);
jobBeingTraced.setFailedReduceAttemptCDF(discCDF);
long totalSuccessfulAttempts = 0L;
long maxTriesToSucceed = 0L;
for (Map.Entry<Long, Long> ent : successfulNthMapperAttempts) {
totalSuccessfulAttempts += ent.getValue();
maxTriesToSucceed = Math.max(maxTriesToSucceed, ent.getKey());
}
if (totalSuccessfulAttempts > 0L) {
double[] successAfterI = new double[(int) maxTriesToSucceed + 1];
for (int i = 0; i < successAfterI.length; ++i) {
successAfterI[i] = 0.0D;
}
for (Map.Entry<Long, Long> ent : successfulNthMapperAttempts) {
successAfterI[ent.getKey().intValue()] =
((double) ent.getValue()) / totalSuccessfulAttempts;
}
jobBeingTraced.setMapperTriesToSucceed(successAfterI);
} else {
jobBeingTraced.setMapperTriesToSucceed(null);
}
jobTraceGen.output(jobBeingTraced);
jobBeingTraced = null;
}
}
public int run(String[] args) throws IOException {
int result = initializeHadoopLogsAnalyzer(args);
if (result != 0) {
return result;
}
return run();
}
int run() throws IOException {
Pair<String, String> line = readBalancedLine();
while (line != null) {
if (debug
&& (lineNumber < 1000000L && lineNumber % 1000L == 0 || lineNumber % 1000000L == 0)) {
LOG.debug("" + lineNumber + " " + line.second());
}
if (line.first() == null) {
try {
// HACK ALERT!! It's possible for a Job end line to end a
// job for which we have a config file
// image [ a ParsedConfigFile ] in jobconf.
//
// processParsedLine handles this.
processParsedLine(new ParsedLine(line.second(), version));
} catch (StringIndexOutOfBoundsException e) {
LOG.warn("anomalous line #" + lineNumber + ":" + line, e);
}
} else {
jobconf = new ParsedConfigFile(line.first(), line.second());
if (jobconf.valid == false) {
jobconf = null;
}
maybeMateJobAndConf();
}
line = readBalancedLine();
}
finalizeJob();
if (collecting) {
String[] typeNames = LogRecordType.lineTypes();
for (int i = 0; i < typeNames.length; ++i) {
statisticalOutput.print(typeNames[i]);
statisticalOutput.print('\n');
}
} else {
if (delays) {
printDistributionSet("Job start delay spectrum:", delayTimeDists);
}
if (runtimes) {
printDistributionSet("Job run time spectrum:", runTimeDists);
}
if (spreading) {
String ratioDescription =
"(" + spreadMax + "/1000 %ile) to (" + spreadMin
+ "/1000 %ile) scaled by 1000000";
printDistributionSet(
"Map task success times " + ratioDescription + ":",
mapTimeSpreadDists);
printDistributionSet("Shuffle success times " + ratioDescription + ":",
shuffleTimeSpreadDists);
printDistributionSet("Sort success times " + ratioDescription + ":",
sortTimeSpreadDists);
printDistributionSet("Reduce success times " + ratioDescription + ":",
reduceTimeSpreadDists);
}
if (collectTaskTimes) {
printDistributionSet("Global map task success times:", mapTimeDists);
printDistributionSet("Global shuffle task success times:",
shuffleTimeDists);
printDistributionSet("Global sort task success times:", sortTimeDists);
printDistributionSet("Global reduce task success times:",
reduceTimeDists);
}
}
if (topologyGen != null) {
LoggedNetworkTopology topo =
new LoggedNetworkTopology(allHosts, "<root>", 0);
topologyGen.output(topo);
topologyGen.close();
}
if (jobTraceGen != null) {
jobTraceGen.close();
}
if (input != null) {
input.close();
input = null;
}
if (inputCodec != null) {
CodecPool.returnDecompressor(inputDecompressor);
inputDecompressor = null;
inputCodec = null;
}
return 0;
}
/**
* @param args
*
* Last arg is the input file. That file can be a directory, in which
* case you get all the files in sorted order. We will decompress
* files whose nmes end in .gz .
*
* switches: -c collect line types.
*
* -d debug mode
*
* -delays print out the delays [interval between job submit time and
* launch time]
*
* -runtimes print out the job runtimes
*
* -spreads print out the ratio of 10%ile and 90%ile, of both the
* successful map task attempt run times and the the successful
* reduce task attempt run times
*
* -tasktimes prints out individual task time distributions
*
* collects all the line types and prints the first example of each
* one
*/
public static void main(String[] args) {
try {
HadoopLogsAnalyzer analyzer = new HadoopLogsAnalyzer();
int result = ToolRunner.run(analyzer, args);
if (result == 0) {
return;
}
System.exit(result);
} catch (FileNotFoundException e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(1);
} catch (IOException e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(2);
} catch (Exception e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(3);
}
}
}
| 57,672 | 30.140929 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/MachineNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
/**
* {@link MachineNode} represents the configuration of a cluster node.
* {@link MachineNode} should be constructed by {@link MachineNode.Builder}.
*/
public final class MachineNode extends Node {
long memory = -1; // in KB
int mapSlots = 1;
int reduceSlots = 1;
long memoryPerMapSlot = -1; // in KB
long memoryPerReduceSlot = -1; // in KB
int numCores = 1;
MachineNode(String name, int level) {
super(name, level);
}
@Override
public boolean equals(Object obj) {
// name/level sufficient
return super.equals(obj);
}
@Override
public int hashCode() {
// match equals
return super.hashCode();
}
/**
* Get the available physical RAM of the node.
* @return The available physical RAM of the node, in KB.
*/
public long getMemory() {
return memory;
}
/**
* Get the number of map slots of the node.
* @return The number of map slots of the node.
*/
public int getMapSlots() {
return mapSlots;
}
/**
* Get the number of reduce slots of the node.
* @return The number of reduce slots fo the node.
*/
public int getReduceSlots() {
return reduceSlots;
}
/**
* Get the amount of RAM reserved for each map slot.
* @return the amount of RAM reserved for each map slot, in KB.
*/
public long getMemoryPerMapSlot() {
return memoryPerMapSlot;
}
/**
* Get the amount of RAM reserved for each reduce slot.
* @return the amount of RAM reserved for each reduce slot, in KB.
*/
public long getMemoryPerReduceSlot() {
return memoryPerReduceSlot;
}
/**
* Get the number of cores of the node.
* @return the number of cores of the node.
*/
public int getNumCores() {
return numCores;
}
/**
* Get the rack node that the machine belongs to.
*
* @return The rack node that the machine belongs to. Returns null if the
* machine does not belong to any rack.
*/
public RackNode getRackNode() {
return (RackNode)getParent();
}
@Override
public synchronized boolean addChild(Node child) {
throw new IllegalStateException("Cannot add child to MachineNode");
}
/**
* Builder for a NodeInfo object
*/
public static final class Builder {
private MachineNode node;
/**
* Start building a new NodeInfo object.
* @param name
* Unique name of the node. Typically the fully qualified domain
* name.
*/
public Builder(String name, int level) {
node = new MachineNode(name, level);
}
/**
* Set the physical memory of the node.
* @param memory Available RAM in KB.
*/
public Builder setMemory(long memory) {
node.memory = memory;
return this;
}
/**
* Set the number of map slot for the node.
* @param mapSlots The number of map slots for the node.
*/
public Builder setMapSlots(int mapSlots) {
node.mapSlots = mapSlots;
return this;
}
/**
* Set the number of reduce slot for the node.
* @param reduceSlots The number of reduce slots for the node.
*/
public Builder setReduceSlots(int reduceSlots) {
node.reduceSlots = reduceSlots;
return this;
}
/**
* Set the amount of RAM reserved for each map slot.
* @param memoryPerMapSlot The amount of RAM reserved for each map slot, in KB.
*/
public Builder setMemoryPerMapSlot(long memoryPerMapSlot) {
node.memoryPerMapSlot = memoryPerMapSlot;
return this;
}
/**
* Set the amount of RAM reserved for each reduce slot.
* @param memoryPerReduceSlot The amount of RAM reserved for each reduce slot, in KB.
*/
public Builder setMemoryPerReduceSlot(long memoryPerReduceSlot) {
node.memoryPerReduceSlot = memoryPerReduceSlot;
return this;
}
/**
* Set the number of cores for the node.
* @param numCores Number of cores for the node.
*/
public Builder setNumCores(int numCores) {
node.numCores = numCores;
return this;
}
/**
* Clone the settings from a reference {@link MachineNode} object.
* @param ref The reference {@link MachineNode} object.
*/
public Builder cloneFrom(MachineNode ref) {
node.memory = ref.memory;
node.mapSlots = ref.mapSlots;
node.reduceSlots = ref.reduceSlots;
node.memoryPerMapSlot = ref.memoryPerMapSlot;
node.memoryPerReduceSlot = ref.memoryPerReduceSlot;
node.numCores = ref.numCores;
return this;
}
/**
* Build the {@link MachineNode} object.
* @return The {@link MachineNode} object being built.
*/
public MachineNode build() {
MachineNode retVal = node;
node = null;
return retVal;
}
}
}
| 5,663 | 26.495146 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/AbstractClusterStory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayDeque;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
/**
* {@link AbstractClusterStory} provides a partial implementation of
* {@link ClusterStory} by parsing the topology tree.
*/
public abstract class AbstractClusterStory implements ClusterStory {
protected Set<MachineNode> machineNodes;
protected Set<RackNode> rackNodes;
protected MachineNode[] mNodesFlattened;
protected Map<String, MachineNode> mNodeMap;
protected Map<String, RackNode> rNodeMap;
protected int maximumDistance = 0;
@Override
public Set<MachineNode> getMachines() {
parseTopologyTree();
return machineNodes;
}
@Override
public synchronized Set<RackNode> getRacks() {
parseTopologyTree();
return rackNodes;
}
@Override
public synchronized MachineNode[] getRandomMachines(int expected,
Random random) {
if (expected == 0) {
return new MachineNode[0];
}
parseTopologyTree();
int total = machineNodes.size();
int select = Math.min(expected, total);
if (mNodesFlattened == null) {
mNodesFlattened = machineNodes.toArray(new MachineNode[total]);
}
MachineNode[] retval = new MachineNode[select];
int i = 0;
while ((i != select) && (total != i + select)) {
int index = random.nextInt(total - i);
MachineNode tmp = mNodesFlattened[index];
mNodesFlattened[index] = mNodesFlattened[total - i - 1];
mNodesFlattened[total - i - 1] = tmp;
++i;
}
if (i == select) {
System.arraycopy(mNodesFlattened, total - i, retval, 0, select);
} else {
System.arraycopy(mNodesFlattened, 0, retval, 0, select);
}
return retval;
}
protected synchronized void buildMachineNodeMap() {
if (mNodeMap == null) {
mNodeMap = new HashMap<String, MachineNode>(machineNodes.size());
for (MachineNode mn : machineNodes) {
mNodeMap.put(mn.getName(), mn);
}
}
}
@Override
public MachineNode getMachineByName(String name) {
buildMachineNodeMap();
return mNodeMap.get(name);
}
@Override
public int distance(Node a, Node b) {
int lvl_a = a.getLevel();
int lvl_b = b.getLevel();
int retval = 0;
if (lvl_a > lvl_b) {
retval = lvl_a-lvl_b;
for (int i=0; i<retval; ++i) {
a = a.getParent();
}
} else if (lvl_a < lvl_b) {
retval = lvl_b-lvl_a;
for (int i=0; i<retval; ++i) {
b = b.getParent();
}
}
while (a != b) {
a = a.getParent();
b = b.getParent();
++retval;
}
return retval;
}
protected synchronized void buildRackNodeMap() {
if (rNodeMap == null) {
rNodeMap = new HashMap<String, RackNode>(rackNodes.size());
for (RackNode rn : rackNodes) {
rNodeMap.put(rn.getName(), rn);
}
}
}
@Override
public RackNode getRackByName(String name) {
buildRackNodeMap();
return rNodeMap.get(name);
}
@Override
public int getMaximumDistance() {
parseTopologyTree();
return maximumDistance;
}
protected synchronized void parseTopologyTree() {
if (machineNodes == null) {
Node root = getClusterTopology();
SortedSet<MachineNode> mNodes = new TreeSet<MachineNode>();
SortedSet<RackNode> rNodes = new TreeSet<RackNode>();
// dfs search of the tree.
Deque<Node> unvisited = new ArrayDeque<Node>();
Deque<Integer> distUnvisited = new ArrayDeque<Integer>();
unvisited.add(root);
distUnvisited.add(0);
for (Node n = unvisited.poll(); n != null; n = unvisited.poll()) {
int distance = distUnvisited.poll();
if (n instanceof RackNode) {
rNodes.add((RackNode) n);
mNodes.addAll(((RackNode) n).getMachinesInRack());
if (distance + 1 > maximumDistance) {
maximumDistance = distance + 1;
}
} else if (n instanceof MachineNode) {
mNodes.add((MachineNode) n);
if (distance > maximumDistance) {
maximumDistance = distance;
}
} else {
for (Node child : n.getChildren()) {
unvisited.addFirst(child);
distUnvisited.addFirst(distance+1);
}
}
}
machineNodes = Collections.unmodifiableSortedSet(mNodes);
rackNodes = Collections.unmodifiableSortedSet(rNodes);
}
}
}
| 5,445 | 28.437838 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ZombieJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.HashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TaskStatus.State;
import org.apache.hadoop.mapreduce.ID;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.tools.rumen.datatypes.*;
import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values;
/**
* {@link ZombieJob} is a layer above {@link LoggedJob} raw JSON objects.
*
* Each {@link ZombieJob} object represents a job in job history. For everything
* that exists in job history, contents are returned unchanged faithfully. To
* get input splits of a non-exist task, a non-exist task attempt, or an
* ill-formed task attempt, proper objects are made up from statistical
* sketches.
*/
@SuppressWarnings("deprecation")
public class ZombieJob implements JobStory {
static final Log LOG = LogFactory.getLog(ZombieJob.class);
private final LoggedJob job;
private Map<TaskID, LoggedTask> loggedTaskMap;
private Map<TaskAttemptID, LoggedTaskAttempt> loggedTaskAttemptMap;
private final Random random;
private InputSplit[] splits;
private final ClusterStory cluster;
private JobConf jobConf;
private long seed;
private long numRandomSeeds = 0;
private boolean hasRandomSeed = false;
private Map<LoggedDiscreteCDF, CDFRandomGenerator> interpolatorMap =
new HashMap<LoggedDiscreteCDF, CDFRandomGenerator>();
// TODO: Fix ZombieJob to initialize this correctly from observed data
double rackLocalOverNodeLocal = 1.5;
double rackRemoteOverNodeLocal = 3.0;
/**
* This constructor creates a {@link ZombieJob} with the same semantics as the
* {@link LoggedJob} passed in this parameter
*
* @param job
* The dead job this ZombieJob instance is based on.
* @param cluster
* The cluster topology where the dead job ran on. This argument can
* be null if we do not have knowledge of the cluster topology.
* @param seed
* Seed for the random number generator for filling in information
* not available from the ZombieJob.
*/
public ZombieJob(LoggedJob job, ClusterStory cluster, long seed) {
if (job == null) {
throw new IllegalArgumentException("job is null");
}
this.job = job;
this.cluster = cluster;
random = new Random(seed);
this.seed = seed;
hasRandomSeed = true;
}
/**
* This constructor creates a {@link ZombieJob} with the same semantics as the
* {@link LoggedJob} passed in this parameter
*
* @param job
* The dead job this ZombieJob instance is based on.
* @param cluster
* The cluster topology where the dead job ran on. This argument can
* be null if we do not have knowledge of the cluster topology.
*/
public ZombieJob(LoggedJob job, ClusterStory cluster) {
this(job, cluster, System.nanoTime());
}
private static State convertState(Values status) {
if (status == Values.SUCCESS) {
return State.SUCCEEDED;
} else if (status == Values.FAILED) {
return State.FAILED;
} else if (status == Values.KILLED) {
return State.KILLED;
} else {
throw new IllegalArgumentException("unknown status " + status);
}
}
@Override
public synchronized JobConf getJobConf() {
if (jobConf == null) {
jobConf = new JobConf();
// Add parameters from the configuration in the job trace
//
// The reason why the job configuration parameters, as seen in the jobconf
// file, are added first because the specialized values obtained from
// Rumen should override the job conf values.
//
for (Map.Entry<Object, Object> entry : job.getJobProperties().getValue().entrySet()) {
jobConf.set(entry.getKey().toString(), entry.getValue().toString());
}
//TODO Eliminate parameters that are already copied from the job's
// configuration file.
jobConf.setJobName(getName());
jobConf.setUser(getUser());
jobConf.setNumMapTasks(getNumberMaps());
jobConf.setNumReduceTasks(getNumberReduces());
jobConf.setQueueName(getQueueName());
}
return jobConf;
}
@Override
public InputSplit[] getInputSplits() {
if (splits == null) {
List<InputSplit> splitsList = new ArrayList<InputSplit>();
Path emptyPath = new Path("/");
int totalHosts = 0; // use to determine avg # of hosts per split.
for (LoggedTask mapTask : job.getMapTasks()) {
Pre21JobHistoryConstants.Values taskType = mapTask.getTaskType();
if (taskType != Pre21JobHistoryConstants.Values.MAP) {
LOG.warn("TaskType for a MapTask is not Map. task="
+ mapTask.getTaskID() + " type="
+ ((taskType == null) ? "null" : taskType.toString()));
continue;
}
List<LoggedLocation> locations = mapTask.getPreferredLocations();
List<String> hostList = new ArrayList<String>();
if (locations != null) {
for (LoggedLocation location : locations) {
List<NodeName> layers = location.getLayers();
if (layers.size() == 0) {
LOG.warn("Bad location layer format for task "+mapTask.getTaskID());
continue;
}
String host = layers.get(layers.size() - 1).getValue();
if (host == null) {
LOG.warn("Bad location layer format for task "+mapTask.getTaskID() + ": " + layers);
continue;
}
hostList.add(host);
}
}
String[] hosts = hostList.toArray(new String[hostList.size()]);
totalHosts += hosts.length;
long mapInputBytes = getTaskInfo(mapTask).getInputBytes();
if (mapInputBytes < 0) {
LOG.warn("InputBytes for task "+mapTask.getTaskID()+" is not defined.");
mapInputBytes = 0;
}
splitsList.add(new FileSplit(emptyPath, 0, mapInputBytes, hosts));
}
// If not all map tasks are in job trace, should make up some splits
// for missing map tasks.
int totalMaps = job.getTotalMaps();
if (totalMaps < splitsList.size()) {
LOG.warn("TotalMaps for job " + job.getJobID()
+ " is less than the total number of map task descriptions ("
+ totalMaps + "<" + splitsList.size() + ").");
}
int avgHostPerSplit;
if (splitsList.size() == 0) {
avgHostPerSplit = 3;
} else {
avgHostPerSplit = totalHosts / splitsList.size();
if (avgHostPerSplit == 0) {
avgHostPerSplit = 3;
}
}
for (int i = splitsList.size(); i < totalMaps; i++) {
if (cluster == null) {
splitsList.add(new FileSplit(emptyPath, 0, 0, new String[0]));
} else {
MachineNode[] mNodes = cluster.getRandomMachines(avgHostPerSplit,
random);
String[] hosts = new String[mNodes.length];
for (int j = 0; j < hosts.length; ++j) {
hosts[j] = mNodes[j].getName();
}
// TODO set size of a split to 0 now.
splitsList.add(new FileSplit(emptyPath, 0, 0, hosts));
}
}
splits = splitsList.toArray(new InputSplit[splitsList.size()]);
}
return splits;
}
@Override
public String getName() {
JobName jobName = job.getJobName();
if (jobName == null || jobName.getValue() == null) {
return "(name unknown)";
} else {
return jobName.getValue();
}
}
@Override
public JobID getJobID() {
return getLoggedJob().getJobID();
}
private int sanitizeValue(int oldVal, int defaultVal, String name, JobID id) {
if (oldVal == -1) {
LOG.warn(name +" not defined for "+id);
return defaultVal;
}
return oldVal;
}
@Override
public int getNumberMaps() {
return sanitizeValue(job.getTotalMaps(), 0, "NumberMaps", job.getJobID());
}
@Override
public int getNumberReduces() {
return sanitizeValue(job.getTotalReduces(), 0, "NumberReduces", job.getJobID());
}
@Override
public Values getOutcome() {
return job.getOutcome();
}
@Override
public long getSubmissionTime() {
return job.getSubmitTime() - job.getRelativeTime();
}
@Override
public String getQueueName() {
QueueName queue = job.getQueue();
return (queue == null || queue.getValue() == null)
? JobConf.DEFAULT_QUEUE_NAME
: queue.getValue();
}
/**
* Getting the number of map tasks that are actually logged in the trace.
* @return The number of map tasks that are actually logged in the trace.
*/
public int getNumLoggedMaps() {
return job.getMapTasks().size();
}
/**
* Getting the number of reduce tasks that are actually logged in the trace.
* @return The number of map tasks that are actually logged in the trace.
*/
public int getNumLoggedReduces() {
return job.getReduceTasks().size();
}
/**
* Mask the job ID part in a {@link TaskID}.
*
* @param taskId
* raw {@link TaskID} read from trace
* @return masked {@link TaskID} with empty {@link JobID}.
*/
private TaskID maskTaskID(TaskID taskId) {
JobID jobId = new JobID();
TaskType taskType = taskId.getTaskType();
return new TaskID(jobId, taskType, taskId.getId());
}
/**
* Mask the job ID part in a {@link TaskAttemptID}.
*
* @param attemptId
* raw {@link TaskAttemptID} read from trace
* @return masked {@link TaskAttemptID} with empty {@link JobID}.
*/
private TaskAttemptID maskAttemptID(TaskAttemptID attemptId) {
JobID jobId = new JobID();
TaskType taskType = attemptId.getTaskType();
TaskID taskId = attemptId.getTaskID();
return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), taskType,
taskId.getId(), attemptId.getId());
}
private LoggedTask sanitizeLoggedTask(LoggedTask task) {
if (task == null) {
return null;
}
if (task.getTaskType() == null) {
LOG.warn("Task " + task.getTaskID() + " has nulll TaskType");
return null;
}
if (task.getTaskStatus() == null) {
LOG.warn("Task " + task.getTaskID() + " has nulll TaskStatus");
return null;
}
return task;
}
private LoggedTaskAttempt sanitizeLoggedTaskAttempt(LoggedTaskAttempt attempt) {
if (attempt == null) {
return null;
}
if (attempt.getResult() == null) {
LOG.warn("TaskAttempt " + attempt.getResult() + " has nulll Result");
return null;
}
return attempt;
}
/**
* Build task mapping and task attempt mapping, to be later used to find
* information of a particular {@link TaskID} or {@link TaskAttemptID}.
*/
private synchronized void buildMaps() {
if (loggedTaskMap == null) {
loggedTaskMap = new HashMap<TaskID, LoggedTask>();
loggedTaskAttemptMap = new HashMap<TaskAttemptID, LoggedTaskAttempt>();
for (LoggedTask map : job.getMapTasks()) {
map = sanitizeLoggedTask(map);
if (map != null) {
loggedTaskMap.put(maskTaskID(map.taskID), map);
for (LoggedTaskAttempt mapAttempt : map.getAttempts()) {
mapAttempt = sanitizeLoggedTaskAttempt(mapAttempt);
if (mapAttempt != null) {
TaskAttemptID id = mapAttempt.getAttemptID();
loggedTaskAttemptMap.put(maskAttemptID(id), mapAttempt);
}
}
}
}
for (LoggedTask reduce : job.getReduceTasks()) {
reduce = sanitizeLoggedTask(reduce);
if (reduce != null) {
loggedTaskMap.put(maskTaskID(reduce.taskID), reduce);
for (LoggedTaskAttempt reduceAttempt : reduce.getAttempts()) {
reduceAttempt = sanitizeLoggedTaskAttempt(reduceAttempt);
if (reduceAttempt != null) {
TaskAttemptID id = reduceAttempt.getAttemptID();
loggedTaskAttemptMap.put(maskAttemptID(id), reduceAttempt);
}
}
}
}
// TODO: do not care about "other" tasks, "setup" or "clean"
}
}
@Override
public String getUser() {
UserName retval = job.getUser();
return (retval == null || retval.getValue() == null)
? "(unknown)"
: retval.getValue();
}
/**
* Get the underlining {@link LoggedJob} object read directly from the trace.
* This is mainly for debugging.
*
* @return the underlining {@link LoggedJob} object
*/
public LoggedJob getLoggedJob() {
return job;
}
/**
* Get a {@link TaskAttemptInfo} with a {@link TaskAttemptID} associated with
* taskType, taskNumber, and taskAttemptNumber. This function does not care
* about locality, and follows the following decision logic: 1. Make up a
* {@link TaskAttemptInfo} if the task attempt is missing in trace, 2. Make up
* a {@link TaskAttemptInfo} if the task attempt has a KILLED final status in
* trace, 3. Otherwise (final state is SUCCEEDED or FAILED), construct the
* {@link TaskAttemptInfo} from the trace.
*/
public TaskAttemptInfo getTaskAttemptInfo(TaskType taskType, int taskNumber,
int taskAttemptNumber) {
// does not care about locality. assume default locality is NODE_LOCAL.
// But if both task and task attempt exist in trace, use logged locality.
int locality = 0;
LoggedTask loggedTask = getLoggedTask(taskType, taskNumber);
if (loggedTask == null) {
// TODO insert parameters
TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
taskNumber, locality);
}
LoggedTaskAttempt loggedAttempt = getLoggedTaskAttempt(taskType,
taskNumber, taskAttemptNumber);
if (loggedAttempt == null) {
// Task exists, but attempt is missing.
TaskInfo taskInfo = getTaskInfo(loggedTask);
return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
taskNumber, locality);
} else {
// TODO should we handle killed attempts later?
if (loggedAttempt.getResult()== Values.KILLED) {
TaskInfo taskInfo = getTaskInfo(loggedTask);
return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
taskNumber, locality);
} else {
return getTaskAttemptInfo(loggedTask, loggedAttempt);
}
}
}
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
return getTaskInfo(getLoggedTask(taskType, taskNumber));
}
/**
* Get a {@link TaskAttemptInfo} with a {@link TaskAttemptID} associated with
* taskType, taskNumber, and taskAttemptNumber. This function considers
* locality, and follows the following decision logic: 1. Make up a
* {@link TaskAttemptInfo} if the task attempt is missing in trace, 2. Make up
* a {@link TaskAttemptInfo} if the task attempt has a KILLED final status in
* trace, 3. If final state is FAILED, construct a {@link TaskAttemptInfo}
* from the trace, without considering locality. 4. If final state is
* SUCCEEDED, construct a {@link TaskAttemptInfo} from the trace, with runtime
* scaled according to locality in simulation and locality in trace.
*/
@Override
public TaskAttemptInfo getMapTaskAttemptInfoAdjusted(int taskNumber,
int taskAttemptNumber, int locality) {
TaskType taskType = TaskType.MAP;
LoggedTask loggedTask = getLoggedTask(taskType, taskNumber);
if (loggedTask == null) {
// TODO insert parameters
TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
taskNumber, locality);
}
LoggedTaskAttempt loggedAttempt = getLoggedTaskAttempt(taskType,
taskNumber, taskAttemptNumber);
if (loggedAttempt == null) {
// Task exists, but attempt is missing.
TaskInfo taskInfo = getTaskInfo(loggedTask);
return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
taskNumber, locality);
} else {
// Task and TaskAttempt both exist.
if (loggedAttempt.getResult() == Values.KILLED) {
TaskInfo taskInfo = getTaskInfo(loggedTask);
return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
taskNumber, locality);
} else if (loggedAttempt.getResult() == Values.FAILED) {
/**
* FAILED attempt is not affected by locality however, made-up FAILED
* attempts ARE affected by locality, since statistics are present for
* attempts of different locality.
*/
return getTaskAttemptInfo(loggedTask, loggedAttempt);
} else if (loggedAttempt.getResult() == Values.SUCCESS) {
int loggedLocality = getLocality(loggedTask, loggedAttempt);
if (locality == loggedLocality) {
return getTaskAttemptInfo(loggedTask, loggedAttempt);
} else {
// attempt succeeded in trace. It is scheduled in simulation with
// a different locality.
return scaleInfo(loggedTask, loggedAttempt, locality, loggedLocality,
rackLocalOverNodeLocal, rackRemoteOverNodeLocal);
}
} else {
throw new IllegalArgumentException(
"attempt result is not SUCCEEDED, FAILED or KILLED: "
+ loggedAttempt.getResult());
}
}
}
private long sanitizeTaskRuntime(long time, ID id) {
if (time < 0) {
LOG.warn("Negative running time for task "+id+": "+time);
return 100L; // set default to 100ms.
}
return time;
}
@SuppressWarnings("hiding")
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
TaskInfo taskInfo = getTaskInfo(loggedTask);
double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
rackRemoteOverNodeLocal };
double scaleFactor = factors[locality] / factors[loggedLocality];
State state = convertState(loggedAttempt.getResult());
if (loggedTask.getTaskType() == Values.MAP) {
long taskTime = 0;
if (loggedAttempt.getStartTime() == 0) {
taskTime = makeUpMapRuntime(state, locality);
} else {
taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
}
taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
taskTime *= scaleFactor;
return new MapTaskAttemptInfo
(state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
} else {
throw new IllegalArgumentException("taskType can only be MAP: "
+ loggedTask.getTaskType());
}
}
private int getLocality(LoggedTask loggedTask, LoggedTaskAttempt loggedAttempt) {
int distance = cluster.getMaximumDistance();
String rackHostName = loggedAttempt.getHostName().getValue();
if (rackHostName == null) {
return distance;
}
MachineNode mn = getMachineNode(rackHostName);
if (mn == null) {
return distance;
}
List<LoggedLocation> locations = loggedTask.getPreferredLocations();
if (locations != null) {
for (LoggedLocation location : locations) {
List<NodeName> layers = location.getLayers();
if ((layers == null) || (layers.isEmpty())) {
continue;
}
String dataNodeName = layers.get(layers.size()-1).getValue();
MachineNode dataNode = cluster.getMachineByName(dataNodeName);
if (dataNode != null) {
distance = Math.min(distance, cluster.distance(mn, dataNode));
}
}
}
return distance;
}
private MachineNode getMachineNode(String rackHostName) {
ParsedHost parsedHost = ParsedHost.parse(rackHostName);
String hostName = (parsedHost == null) ? rackHostName
: parsedHost.getNodeName();
if (hostName == null) {
return null;
}
return (cluster == null) ? null : cluster.getMachineByName(hostName);
}
private TaskAttemptInfo getTaskAttemptInfo(LoggedTask loggedTask,
LoggedTaskAttempt loggedAttempt) {
TaskInfo taskInfo = getTaskInfo(loggedTask);
List<List<Integer>> allSplitVectors = loggedAttempt.allSplitVectors();
State state = convertState(loggedAttempt.getResult());
if (loggedTask.getTaskType() == Values.MAP) {
long taskTime;
if (loggedAttempt.getStartTime() == 0) {
int locality = getLocality(loggedTask, loggedAttempt);
taskTime = makeUpMapRuntime(state, locality);
} else {
taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
}
taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
return new MapTaskAttemptInfo(state, taskInfo, taskTime, allSplitVectors);
} else if (loggedTask.getTaskType() == Values.REDUCE) {
long startTime = loggedAttempt.getStartTime();
long mergeDone = loggedAttempt.getSortFinished();
long shuffleDone = loggedAttempt.getShuffleFinished();
long finishTime = loggedAttempt.getFinishTime();
if (startTime <= 0 || startTime >= finishTime) {
// have seen startTime>finishTime.
// haven't seen reduce task with startTime=0 ever. But if this happens,
// make up a reduceTime with no shuffle/merge.
long reduceTime = makeUpReduceRuntime(state);
return new ReduceTaskAttemptInfo
(state, taskInfo, 0, 0, reduceTime, allSplitVectors);
} else {
if (shuffleDone <= 0) {
shuffleDone = startTime;
}
if (mergeDone <= 0) {
mergeDone = finishTime;
}
long shuffleTime = shuffleDone - startTime;
long mergeTime = mergeDone - shuffleDone;
long reduceTime = finishTime - mergeDone;
reduceTime = sanitizeTaskRuntime(reduceTime, loggedAttempt.getAttemptID());
return new ReduceTaskAttemptInfo(state, taskInfo, shuffleTime,
mergeTime, reduceTime, allSplitVectors);
}
} else {
throw new IllegalArgumentException("taskType for "
+ loggedTask.getTaskID() + " is neither MAP nor REDUCE: "
+ loggedTask.getTaskType());
}
}
private TaskInfo getTaskInfo(LoggedTask loggedTask) {
if (loggedTask == null) {
return new TaskInfo(0, 0, 0, 0, 0);
}
List<LoggedTaskAttempt> attempts = loggedTask.getAttempts();
long inputBytes = -1;
long inputRecords = -1;
long outputBytes = -1;
long outputRecords = -1;
long heapMegabytes = -1;
ResourceUsageMetrics metrics = new ResourceUsageMetrics();
Values type = loggedTask.getTaskType();
if ((type != Values.MAP) && (type != Values.REDUCE)) {
throw new IllegalArgumentException(
"getTaskInfo only supports MAP or REDUCE tasks: " + type.toString()
+ " for task = " + loggedTask.getTaskID());
}
for (LoggedTaskAttempt attempt : attempts) {
attempt = sanitizeLoggedTaskAttempt(attempt);
// ignore bad attempts or unsuccessful attempts.
if ((attempt == null) || (attempt.getResult() != Values.SUCCESS)) {
continue;
}
if (type == Values.MAP) {
inputBytes = attempt.getHdfsBytesRead();
inputRecords = attempt.getMapInputRecords();
outputBytes =
(job.getTotalReduces() > 0) ? attempt.getMapOutputBytes() : attempt
.getHdfsBytesWritten();
outputRecords = attempt.getMapOutputRecords();
heapMegabytes =
(job.getJobMapMB() > 0) ? job.getJobMapMB() : job
.getHeapMegabytes();
} else {
inputBytes = attempt.getReduceShuffleBytes();
inputRecords = attempt.getReduceInputRecords();
outputBytes = attempt.getHdfsBytesWritten();
outputRecords = attempt.getReduceOutputRecords();
heapMegabytes =
(job.getJobReduceMB() > 0) ? job.getJobReduceMB() : job
.getHeapMegabytes();
}
// set the resource usage metrics
metrics = attempt.getResourceUsageMetrics();
break;
}
TaskInfo taskInfo =
new TaskInfo(inputBytes, (int) inputRecords, outputBytes,
(int) outputRecords, (int) heapMegabytes,
metrics);
return taskInfo;
}
private TaskAttemptID makeTaskAttemptID(TaskType taskType, int taskNumber,
int taskAttemptNumber) {
return new TaskAttemptID(new TaskID(job.getJobID(), taskType, taskNumber),
taskAttemptNumber);
}
private TaskAttemptInfo makeUpTaskAttemptInfo(TaskType taskType, TaskInfo taskInfo,
int taskAttemptNumber, int taskNumber, int locality) {
if (taskType == TaskType.MAP) {
State state = State.SUCCEEDED;
long runtime = 0;
// make up state
state = makeUpState(taskAttemptNumber, job.getMapperTriesToSucceed());
runtime = makeUpMapRuntime(state, locality);
runtime = sanitizeTaskRuntime(runtime, makeTaskAttemptID(taskType,
taskNumber, taskAttemptNumber));
TaskAttemptInfo tai
= new MapTaskAttemptInfo(state, taskInfo, runtime, null);
return tai;
} else if (taskType == TaskType.REDUCE) {
State state = State.SUCCEEDED;
long shuffleTime = 0;
long sortTime = 0;
long reduceTime = 0;
// TODO make up state
// state = makeUpState(taskAttemptNumber, job.getReducerTriesToSucceed());
reduceTime = makeUpReduceRuntime(state);
TaskAttemptInfo tai = new ReduceTaskAttemptInfo
(state, taskInfo, shuffleTime, sortTime, reduceTime, null);
return tai;
}
throw new IllegalArgumentException("taskType is neither MAP nor REDUCE: "
+ taskType);
}
private long makeUpReduceRuntime(State state) {
long reduceTime = 0;
for (int i = 0; i < 5; i++) {
reduceTime = doMakeUpReduceRuntime(state);
if (reduceTime >= 0) {
return reduceTime;
}
}
return 0;
}
private long doMakeUpReduceRuntime(State state) {
long reduceTime;
try {
if (state == State.SUCCEEDED) {
reduceTime = makeUpRuntime(job.getSuccessfulReduceAttemptCDF());
} else if (state == State.FAILED) {
reduceTime = makeUpRuntime(job.getFailedReduceAttemptCDF());
} else {
throw new IllegalArgumentException(
"state is neither SUCCEEDED nor FAILED: " + state);
}
return reduceTime;
} catch (NoValueToMakeUpRuntime e) {
return 0;
}
}
private long makeUpMapRuntime(State state, int locality) {
long runtime;
// make up runtime
if (state == State.SUCCEEDED || state == State.FAILED) {
List<LoggedDiscreteCDF> cdfList =
state == State.SUCCEEDED ? job.getSuccessfulMapAttemptCDFs() : job
.getFailedMapAttemptCDFs();
// XXX MapCDFs is a ArrayList of 4 possible groups: distance=0, 1, 2, and
// the last group is "distance cannot be determined". All pig jobs
// would have only the 4th group, and pig tasks usually do not have
// any locality, so this group should count as "distance=2".
// However, setup/cleanup tasks are also counted in the 4th group.
// These tasks do not make sense.
if(cdfList==null) {
runtime = -1;
return runtime;
}
try {
runtime = makeUpRuntime(cdfList.get(locality));
} catch (NoValueToMakeUpRuntime e) {
runtime = makeUpRuntime(cdfList);
}
} else {
throw new IllegalArgumentException(
"state is neither SUCCEEDED nor FAILED: " + state);
}
return runtime;
}
/**
* Perform a weighted random selection on a list of CDFs, and produce a random
* variable using the selected CDF.
*
* @param mapAttemptCDFs
* A list of CDFs for the distribution of runtime for the 1st, 2nd,
* ... map attempts for the job.
*/
private long makeUpRuntime(List<LoggedDiscreteCDF> mapAttemptCDFs) {
int total = 0;
if(mapAttemptCDFs == null) {
return -1;
}
for (LoggedDiscreteCDF cdf : mapAttemptCDFs) {
total += cdf.getNumberValues();
}
if (total == 0) {
return -1;
}
int index = random.nextInt(total);
for (LoggedDiscreteCDF cdf : mapAttemptCDFs) {
if (index >= cdf.getNumberValues()) {
index -= cdf.getNumberValues();
} else {
if (index < 0) {
throw new IllegalStateException("application error");
}
return makeUpRuntime(cdf);
}
}
throw new IllegalStateException("not possible to get here");
}
private long makeUpRuntime(LoggedDiscreteCDF loggedDiscreteCDF) {
/*
* We need this odd-looking code because if a seed exists we need to ensure
* that only one interpolator is generated per LoggedDiscreteCDF, but if no
* seed exists then the potentially lengthy process of making an
* interpolator can happen outside the lock. makeUpRuntimeCore only locks
* around the two hash map accesses.
*/
if (hasRandomSeed) {
synchronized (interpolatorMap) {
return makeUpRuntimeCore(loggedDiscreteCDF);
}
}
return makeUpRuntimeCore(loggedDiscreteCDF);
}
private synchronized long getNextRandomSeed() {
numRandomSeeds++;
return RandomSeedGenerator.getSeed("forZombieJob" + job.getJobID(),
numRandomSeeds);
}
private long makeUpRuntimeCore(LoggedDiscreteCDF loggedDiscreteCDF) {
CDFRandomGenerator interpolator;
synchronized (interpolatorMap) {
interpolator = interpolatorMap.get(loggedDiscreteCDF);
}
if (interpolator == null) {
if (loggedDiscreteCDF.getNumberValues() == 0) {
throw new NoValueToMakeUpRuntime("no value to use to make up runtime");
}
interpolator =
hasRandomSeed ? new CDFPiecewiseLinearRandomGenerator(
loggedDiscreteCDF, getNextRandomSeed())
: new CDFPiecewiseLinearRandomGenerator(loggedDiscreteCDF);
/*
* It doesn't matter if we compute and store an interpolator twice because
* the two instances will be semantically identical and stateless, unless
* we're seeded, in which case we're not stateless but this code will be
* called synchronizedly.
*/
synchronized (interpolatorMap) {
interpolatorMap.put(loggedDiscreteCDF, interpolator);
}
}
return interpolator.randomValue();
}
static private class NoValueToMakeUpRuntime extends IllegalArgumentException {
static final long serialVersionUID = 1L;
NoValueToMakeUpRuntime() {
super();
}
NoValueToMakeUpRuntime(String detailMessage) {
super(detailMessage);
}
NoValueToMakeUpRuntime(String detailMessage, Throwable cause) {
super(detailMessage, cause);
}
NoValueToMakeUpRuntime(Throwable cause) {
super(cause);
}
}
private State makeUpState(int taskAttemptNumber, double[] numAttempts) {
// if numAttempts == null we are returning FAILED.
if(numAttempts == null) {
return State.FAILED;
}
if (taskAttemptNumber >= numAttempts.length - 1) {
// always succeed
return State.SUCCEEDED;
} else {
double pSucceed = numAttempts[taskAttemptNumber];
double pFail = 0;
for (int i = taskAttemptNumber + 1; i < numAttempts.length; i++) {
pFail += numAttempts[i];
}
return (random.nextDouble() < pSucceed / (pSucceed + pFail)) ? State.SUCCEEDED
: State.FAILED;
}
}
private TaskID getMaskedTaskID(TaskType taskType, int taskNumber) {
return new TaskID(new JobID(), taskType, taskNumber);
}
private LoggedTask getLoggedTask(TaskType taskType, int taskNumber) {
buildMaps();
return loggedTaskMap.get(getMaskedTaskID(taskType, taskNumber));
}
private LoggedTaskAttempt getLoggedTaskAttempt(TaskType taskType,
int taskNumber, int taskAttemptNumber) {
buildMaps();
TaskAttemptID id =
new TaskAttemptID(getMaskedTaskID(taskType, taskNumber),
taskAttemptNumber);
return loggedTaskAttemptMap.get(id);
}
}
| 33,581 | 34.763578 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen;
import java.io.Closeable;
import java.io.IOException;
import java.io.Serializable;
import java.util.Comparator;
import java.util.Iterator;
import java.util.PriorityQueue;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class DeskewedJobTraceReader implements Closeable {
// underlying engine
private final JobTraceReader reader;
// configuration variables
private final int skewBufferLength;
private final boolean abortOnUnfixableSkew;
// state variables
private long skewMeasurementLatestSubmitTime = Long.MIN_VALUE;
private long returnedLatestSubmitTime = Long.MIN_VALUE;
private int maxSkewBufferNeeded = 0;
// a submit time will NOT be in countedRepeatedSubmitTimesSoFar if
// it only occurs once. This situation is represented by having the
// time in submitTimesSoFar only. A submit time that occurs twice or more
// appears in countedRepeatedSubmitTimesSoFar [with the appropriate range
// value] AND submitTimesSoFar
private TreeMap<Long, Integer> countedRepeatedSubmitTimesSoFar =
new TreeMap<Long, Integer>();
private TreeSet<Long> submitTimesSoFar = new TreeSet<Long>();
private final PriorityQueue<LoggedJob> skewBuffer;
static final private Log LOG =
LogFactory.getLog(DeskewedJobTraceReader.class);
static private class JobComparator implements Comparator<LoggedJob>,
Serializable {
@Override
public int compare(LoggedJob j1, LoggedJob j2) {
return (j1.getSubmitTime() < j2.getSubmitTime()) ? -1 : (j1
.getSubmitTime() == j2.getSubmitTime()) ? 0 : 1;
}
}
/**
* Constructor.
*
* @param reader
* the {@link JobTraceReader} that's being protected
* @param skewBufferLength
* [the number of late jobs that can preced a later out-of-order
* earlier job
* @throws IOException
*/
public DeskewedJobTraceReader(JobTraceReader reader, int skewBufferLength,
boolean abortOnUnfixableSkew) throws IOException {
this.reader = reader;
this.skewBufferLength = skewBufferLength;
this.abortOnUnfixableSkew = abortOnUnfixableSkew;
skewBuffer =
new PriorityQueue<LoggedJob>(skewBufferLength + 1, new JobComparator());
fillSkewBuffer();
}
public DeskewedJobTraceReader(JobTraceReader reader) throws IOException {
this(reader, 0, true);
}
private LoggedJob rawNextJob() throws IOException {
LoggedJob result = reader.getNext();
if ((!abortOnUnfixableSkew || skewBufferLength > 0) && result != null) {
long thisTime = result.getSubmitTime();
if (submitTimesSoFar.contains(thisTime)) {
Integer myCount = countedRepeatedSubmitTimesSoFar.get(thisTime);
countedRepeatedSubmitTimesSoFar.put(thisTime, myCount == null ? 2
: myCount + 1);
} else {
submitTimesSoFar.add(thisTime);
}
if (thisTime < skewMeasurementLatestSubmitTime) {
Iterator<Long> endCursor = submitTimesSoFar.descendingIterator();
int thisJobNeedsSkew = 0;
Long keyNeedingSkew;
while (endCursor.hasNext()
&& (keyNeedingSkew = endCursor.next()) > thisTime) {
Integer keyNeedsSkewAmount =
countedRepeatedSubmitTimesSoFar.get(keyNeedingSkew);
thisJobNeedsSkew +=
keyNeedsSkewAmount == null ? 1 : keyNeedsSkewAmount;
}
maxSkewBufferNeeded = Math.max(maxSkewBufferNeeded, thisJobNeedsSkew);
}
skewMeasurementLatestSubmitTime =
Math.max(thisTime, skewMeasurementLatestSubmitTime);
}
return result;
}
static class OutOfOrderException extends RuntimeException {
static final long serialVersionUID = 1L;
public OutOfOrderException(String text) {
super(text);
}
}
LoggedJob nextJob() throws IOException, OutOfOrderException {
LoggedJob newJob = rawNextJob();
if (newJob != null) {
skewBuffer.add(newJob);
}
LoggedJob result = skewBuffer.poll();
while (result != null && result.getSubmitTime() < returnedLatestSubmitTime) {
LOG.error("The current job was submitted earlier than the previous one");
LOG.error("Its jobID is " + result.getJobID());
LOG.error("Its submit time is " + result.getSubmitTime()
+ ",but the previous one was " + returnedLatestSubmitTime);
if (abortOnUnfixableSkew) {
throw new OutOfOrderException("Job submit time is "
+ result.getSubmitTime() + ",but the previous one was "
+ returnedLatestSubmitTime);
}
result = rawNextJob();
}
if (result != null) {
returnedLatestSubmitTime = result.getSubmitTime();
}
return result;
}
private void fillSkewBuffer() throws IOException {
for (int i = 0; i < skewBufferLength; ++i) {
LoggedJob newJob = rawNextJob();
if (newJob == null) {
return;
}
skewBuffer.add(newJob);
}
}
int neededSkewBufferSize() {
return maxSkewBufferNeeded;
}
@Override
public void close() throws IOException {
reader.close();
}
}
| 6,012 | 28.767327 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.anonymization;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.tools.rumen.state.State;
/**
* Represents the list of words used in list-backed anonymizers.
*/
public class WordList implements State {
private Map<String, Integer> list = new HashMap<String, Integer>(0);
private boolean isUpdated = false;
private String name;
public WordList() {
this("word");
}
public WordList(String name) {
this.name = name;
}
@Override
public String getName() {
return name;
}
/**
* Adds the specified word to the list if the word is not already added.
*/
public void add(String word) {
if (!contains(word)) {
int index = getSize();
list.put(word, index);
isUpdated = true;
}
}
/**
* Returns 'true' if the list contains the specified word.
*/
public boolean contains(String word) {
return list.containsKey(word);
}
/**
* Returns the index of the specified word in the list.
*/
public int indexOf(String word) {
return list.get(word);
}
/**
* Returns the size of the list.
*/
public int getSize() {
return list.size();
}
/**
* Returns 'true' if the list is updated since creation (and reload).
*/
@Override
public boolean isUpdated() {
return isUpdated;
}
/**
* Setters and getters for Jackson JSON
*/
/**
* Sets the size of the list.
*
* Note: That this API is only for Jackson JSON deserialization.
*/
public void setSize(int size) {
list = new HashMap<String, Integer>(size);
}
/**
* Note: That this API is only for Jackson JSON deserialization.
*/
@Override
public void setName(String name) {
this.name = name;
}
/**
* Gets the words.
*
* Note: That this API is only for Jackson JSON serialization.
*/
public Map<String, Integer> getWords() {
return list;
}
/**
* Sets the words.
*
* Note: That this API is only for Jackson JSON deserialization.
*/
public void setWords(Map<String, Integer> list) {
this.list = list;
}
}
| 2,921 | 22.564516 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/DataAnonymizer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.anonymization;
import org.apache.hadoop.tools.rumen.state.State;
/**
* The data anonymizer interface.
*/
public interface DataAnonymizer<T> {
T anonymize(T data, State state);
}
| 1,029 | 35.785714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.anonymization;
import org.apache.commons.lang.StringUtils;
/**
* Utility class to handle commonly performed tasks in a
* {@link org.apache.hadoop.tools.rumen.datatypes.DefaultAnonymizableDataType}
* using a {@link WordList} for anonymization.
* //TODO There is no caching for saving memory.
*/
public class WordListAnonymizerUtility {
static final String[] KNOWN_WORDS =
new String[] {"job", "tmp", "temp", "home", "homes", "usr", "user", "test"};
/**
* Checks if the data needs anonymization. Typically, data types which are
* numeric in nature doesn't need anonymization.
*/
public static boolean needsAnonymization(String data) {
// Numeric data doesn't need anonymization
// Currently this doesnt support inputs like
// - 12.3
// - 12.3f
// - 90L
// - 1D
if (StringUtils.isNumeric(data)) {
return false;
}
return true; // by default return true
}
/**
* Checks if the given data has a known suffix.
*/
public static boolean hasSuffix(String data, String[] suffixes) {
// check if they end in known suffixes
for (String ks : suffixes) {
if (data.endsWith(ks)) {
return true;
}
}
return false;
}
/**
* Extracts a known suffix from the given data.
*
* @throws RuntimeException if the data doesn't have a suffix.
* Use {@link #hasSuffix(String, String[])} to make sure that the
* given data has a suffix.
*/
public static String[] extractSuffix(String data, String[] suffixes) {
// check if they end in known suffixes
String suffix = "";
for (String ks : suffixes) {
if (data.endsWith(ks)) {
suffix = ks;
// stripe off the suffix which will get appended later
data = data.substring(0, data.length() - suffix.length());
return new String[] {data, suffix};
}
}
// throw exception
throw new RuntimeException("Data [" + data + "] doesn't have a suffix from"
+ " known suffixes [" + StringUtils.join(suffixes, ',') + "]");
}
/**
* Checks if the given data is known. This API uses {@link #KNOWN_WORDS} to
* detect if the given data is a commonly used (so called 'known') word.
*/
public static boolean isKnownData(String data) {
return isKnownData(data, KNOWN_WORDS);
}
/**
* Checks if the given data is known.
*/
public static boolean isKnownData(String data, String[] knownWords) {
// check if the data is known content
//TODO [Chunking] Do this for sub-strings of data
for (String kd : knownWords) {
if (data.equals(kd)) {
return true;
}
}
return false;
}
}
| 3,550 | 31.281818 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StateDeserializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.state;
import java.io.IOException;
import org.apache.hadoop.tools.rumen.state.StatePool.StatePair;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.JsonProcessingException;
import org.codehaus.jackson.map.DeserializationContext;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.deser.std.StdDeserializer;
import org.codehaus.jackson.node.ObjectNode;
/**
* Rumen JSON deserializer for deserializing the {@link State} object.
*/
public class StateDeserializer extends StdDeserializer<StatePair> {
public StateDeserializer() {
super(StatePair.class);
}
@Override
public StatePair deserialize(JsonParser parser,
DeserializationContext context)
throws IOException, JsonProcessingException {
ObjectMapper mapper = (ObjectMapper) parser.getCodec();
// set the state-pair object tree
ObjectNode statePairObject = (ObjectNode) mapper.readTree(parser);
Class<?> stateClass = null;
try {
stateClass =
Class.forName(statePairObject.get("className").getTextValue().trim());
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException("Invalid classname!", cnfe);
}
String stateJsonString = statePairObject.get("state").toString();
State state = (State) mapper.readValue(stateJsonString, stateClass);
return new StatePair(state);
}
}
| 2,253 | 37.20339 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/State.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.rumen.state;
import org.codehaus.jackson.annotate.JsonIgnore;
/**
* Represents a state. This state is managed by {@link StatePool}.
*
* Note that a {@link State} objects should be persistable. Currently, the
* {@link State} objects are persisted using the Jackson JSON library. Hence the
* implementors of the {@link State} interface should be careful while defining
* their public setter and getter APIs.
*/
public interface State {
/**
* Returns true if the state is updated since creation (or reload).
*/
@JsonIgnore
boolean isUpdated();
/**
* Get the name of the state.
*/
public String getName();
/**
* Set the name of the state.
*/
public void setName(String name);
}
| 1,565 | 32.319149 | 80 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.